1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc/cms/cmsOopClosures.hpp" 29 #include "gc/cms/gSpaceCounters.hpp" 30 #include "gc/cms/yieldingWorkgroup.hpp" 31 #include "gc/shared/cardGeneration.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcStats.hpp" 34 #include "gc/shared/gcWhen.hpp" 35 #include "gc/shared/generationCounters.hpp" 36 #include "gc/shared/space.hpp" 37 #include "gc/shared/taskqueue.hpp" 38 #include "memory/freeBlockDictionary.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/virtualspace.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "services/memoryService.hpp" 43 #include "utilities/bitMap.hpp" 44 #include "utilities/stack.hpp" 45 46 // ConcurrentMarkSweepGeneration is in support of a concurrent 47 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 48 // style. We assume, for now, that this generation is always the 49 // seniormost generation and for simplicity 50 // in the first implementation, that this generation is a single compactible 51 // space. Neither of these restrictions appears essential, and will be 52 // relaxed in the future when more time is available to implement the 53 // greater generality (and there's a need for it). 54 // 55 // Concurrent mode failures are currently handled by 56 // means of a sliding mark-compact. 57 58 class AdaptiveSizePolicy; 59 class CMSCollector; 60 class CMSConcMarkingTask; 61 class CMSGCAdaptivePolicyCounters; 62 class CMSTracer; 63 class ConcurrentGCTimer; 64 class ConcurrentMarkSweepGeneration; 65 class ConcurrentMarkSweepPolicy; 66 class ConcurrentMarkSweepThread; 67 class CompactibleFreeListSpace; 68 class FreeChunk; 69 class ParNewGeneration; 70 class PromotionInfo; 71 class ScanMarkedObjectsAgainCarefullyClosure; 72 class TenuredGeneration; 73 class SerialOldTracer; 74 75 // A generic CMS bit map. It's the basis for both the CMS marking bit map 76 // as well as for the mod union table (in each case only a subset of the 77 // methods are used). This is essentially a wrapper around the BitMap class, 78 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 79 // we have _shifter == 0. and for the mod union table we have 80 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 81 // XXX 64-bit issues in BitMap? 82 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 83 friend class VMStructs; 84 85 HeapWord* _bmStartWord; // base address of range covered by map 86 size_t _bmWordSize; // map size (in #HeapWords covered) 87 const int _shifter; // shifts to convert HeapWord to bit position 88 VirtualSpace _virtual_space; // underlying the bit map 89 BitMap _bm; // the bit map itself 90 91 // If true, allocate the backing bit map so that it covers the YG in 92 // addition to the OG and the PG. This is used to share the 93 // underlying bit map (RAM) between the concurrent collector and the 94 // parallel full GC under CMSParallelFullGC if 95 // ShareCMSMarkBitMapWithParallelFullGC is true. 96 bool _allocate_for_entire_heap; 97 public: 98 Mutex* const _lock; // mutex protecting _bm; 99 100 public: 101 // constructor 102 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name, 103 bool allocate_for_entire_heap = false); 104 105 MemRegion vspace_mr() { 106 assert(_virtual_space.reserved_size() == _virtual_space.committed_size(), 107 "Must be fully committed by initialize()."); 108 return MemRegion((HeapWord*)_virtual_space.low_boundary(), 109 (HeapWord*)_virtual_space.high_boundary()); 110 } 111 112 // allocates the actual storage for the map 113 bool allocate(MemRegion mr); 114 // field getter 115 Mutex* lock() const { return _lock; } 116 // locking verifier convenience function 117 void assert_locked() const PRODUCT_RETURN; 118 119 // inquiries 120 HeapWord* startWord() const { return _bmStartWord; } 121 size_t sizeInWords() const { return _bmWordSize; } 122 size_t sizeInBits() const { return _bm.size(); } 123 // the following is one past the last word in space 124 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 125 126 // reading marks 127 bool isMarked(HeapWord* addr) const; 128 bool par_isMarked(HeapWord* addr) const; // do not lock checks 129 bool isUnmarked(HeapWord* addr) const; 130 bool isAllClear() const; 131 132 // writing marks 133 void mark(HeapWord* addr); 134 // For marking by parallel GC threads; 135 // returns true if we did, false if another thread did 136 bool par_mark(HeapWord* addr); 137 138 void mark_range(MemRegion mr); 139 void par_mark_range(MemRegion mr); 140 void mark_large_range(MemRegion mr); 141 void par_mark_large_range(MemRegion mr); 142 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 143 void clear_range(MemRegion mr); 144 void par_clear_range(MemRegion mr); 145 void clear_large_range(MemRegion mr); 146 void par_clear_large_range(MemRegion mr); 147 void clear_all(); 148 void clear_all_incrementally(); // Not yet implemented!! 149 150 NOT_PRODUCT( 151 // checks the memory region for validity 152 void region_invariant(MemRegion mr); 153 ) 154 155 // iteration 156 void iterate(BitMapClosure* cl) { 157 _bm.iterate(cl); 158 } 159 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 160 void dirty_range_iterate_clear(MemRegionClosure* cl); 161 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 162 163 // auxiliary support for iteration 164 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 165 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 166 HeapWord* end_addr) const; 167 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 168 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 169 HeapWord* end_addr) const; 170 MemRegion getAndClearMarkedRegion(HeapWord* addr); 171 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 172 HeapWord* end_addr); 173 174 // conversion utilities 175 HeapWord* offsetToHeapWord(size_t offset) const; 176 size_t heapWordToOffset(HeapWord* addr) const; 177 size_t heapWordDiffToOffsetDiff(size_t diff) const; 178 179 void print_on_error(outputStream* st, const char* prefix) const; 180 181 // debugging 182 // is this address range covered by the bit-map? 183 NOT_PRODUCT( 184 bool covers(MemRegion mr) const; 185 bool covers(HeapWord* start, size_t size = 0) const; 186 ) 187 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 188 }; 189 190 // Represents a marking stack used by the CMS collector. 191 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 192 class CMSMarkStack: public CHeapObj<mtGC> { 193 friend class CMSCollector; // To get at expansion stats further below. 194 195 VirtualSpace _virtual_space; // Space for the stack 196 oop* _base; // Bottom of stack 197 size_t _index; // One more than last occupied index 198 size_t _capacity; // Max #elements 199 Mutex _par_lock; // An advisory lock used in case of parallel access 200 NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run 201 202 protected: 203 size_t _hit_limit; // We hit max stack size limit 204 size_t _failed_double; // We failed expansion before hitting limit 205 206 public: 207 CMSMarkStack(): 208 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true, 209 Monitor::_safepoint_check_never), 210 _hit_limit(0), 211 _failed_double(0) {} 212 213 bool allocate(size_t size); 214 215 size_t capacity() const { return _capacity; } 216 217 oop pop() { 218 if (!isEmpty()) { 219 return _base[--_index] ; 220 } 221 return NULL; 222 } 223 224 bool push(oop ptr) { 225 if (isFull()) { 226 return false; 227 } else { 228 _base[_index++] = ptr; 229 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 230 return true; 231 } 232 } 233 234 bool isEmpty() const { return _index == 0; } 235 bool isFull() const { 236 assert(_index <= _capacity, "buffer overflow"); 237 return _index == _capacity; 238 } 239 240 size_t length() { return _index; } 241 242 // "Parallel versions" of some of the above 243 oop par_pop() { 244 // lock and pop 245 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 246 return pop(); 247 } 248 249 bool par_push(oop ptr) { 250 // lock and push 251 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 252 return push(ptr); 253 } 254 255 // Forcibly reset the stack, losing all of its contents. 256 void reset() { 257 _index = 0; 258 } 259 260 // Expand the stack, typically in response to an overflow condition. 261 void expand(); 262 263 // Compute the least valued stack element. 264 oop least_value(HeapWord* low) { 265 oop least = (oop)low; 266 for (size_t i = 0; i < _index; i++) { 267 least = MIN2(least, _base[i]); 268 } 269 return least; 270 } 271 272 // Exposed here to allow stack expansion in || case. 273 Mutex* par_lock() { return &_par_lock; } 274 }; 275 276 class CardTableRS; 277 class CMSParGCThreadState; 278 279 class ModUnionClosure: public MemRegionClosure { 280 protected: 281 CMSBitMap* _t; 282 public: 283 ModUnionClosure(CMSBitMap* t): _t(t) { } 284 void do_MemRegion(MemRegion mr); 285 }; 286 287 class ModUnionClosurePar: public ModUnionClosure { 288 public: 289 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 290 void do_MemRegion(MemRegion mr); 291 }; 292 293 // Survivor Chunk Array in support of parallelization of 294 // Survivor Space rescan. 295 class ChunkArray: public CHeapObj<mtGC> { 296 size_t _index; 297 size_t _capacity; 298 size_t _overflows; 299 HeapWord** _array; // storage for array 300 301 public: 302 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 303 ChunkArray(HeapWord** a, size_t c): 304 _index(0), _capacity(c), _overflows(0), _array(a) {} 305 306 HeapWord** array() { return _array; } 307 void set_array(HeapWord** a) { _array = a; } 308 309 size_t capacity() { return _capacity; } 310 void set_capacity(size_t c) { _capacity = c; } 311 312 size_t end() { 313 assert(_index <= capacity(), 314 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 315 _index, _capacity)); 316 return _index; 317 } // exclusive 318 319 HeapWord* nth(size_t n) { 320 assert(n < end(), "Out of bounds access"); 321 return _array[n]; 322 } 323 324 void reset() { 325 _index = 0; 326 if (_overflows > 0 && PrintCMSStatistics > 1) { 327 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 328 _capacity, _overflows); 329 } 330 _overflows = 0; 331 } 332 333 void record_sample(HeapWord* p, size_t sz) { 334 // For now we do not do anything with the size 335 if (_index < _capacity) { 336 _array[_index++] = p; 337 } else { 338 ++_overflows; 339 assert(_index == _capacity, 340 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 341 "): out of bounds at overflow#" SIZE_FORMAT, 342 _index, _capacity, _overflows)); 343 } 344 } 345 }; 346 347 // 348 // Timing, allocation and promotion statistics for gc scheduling and incremental 349 // mode pacing. Most statistics are exponential averages. 350 // 351 class CMSStats VALUE_OBJ_CLASS_SPEC { 352 private: 353 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 354 355 // The following are exponential averages with factor alpha: 356 // avg = (100 - alpha) * avg + alpha * cur_sample 357 // 358 // The durations measure: end_time[n] - start_time[n] 359 // The periods measure: start_time[n] - start_time[n-1] 360 // 361 // The cms period and duration include only concurrent collections; time spent 362 // in foreground cms collections due to System.gc() or because of a failure to 363 // keep up are not included. 364 // 365 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 366 // real value, but is used only after the first period. A value of 100 is 367 // used for the first sample so it gets the entire weight. 368 unsigned int _saved_alpha; // 0-100 369 unsigned int _gc0_alpha; 370 unsigned int _cms_alpha; 371 372 double _gc0_duration; 373 double _gc0_period; 374 size_t _gc0_promoted; // bytes promoted per gc0 375 double _cms_duration; 376 double _cms_duration_pre_sweep; // time from initiation to start of sweep 377 double _cms_period; 378 size_t _cms_allocated; // bytes of direct allocation per gc0 period 379 380 // Timers. 381 elapsedTimer _cms_timer; 382 TimeStamp _gc0_begin_time; 383 TimeStamp _cms_begin_time; 384 TimeStamp _cms_end_time; 385 386 // Snapshots of the amount used in the CMS generation. 387 size_t _cms_used_at_gc0_begin; 388 size_t _cms_used_at_gc0_end; 389 size_t _cms_used_at_cms_begin; 390 391 // Used to prevent the duty cycle from being reduced in the middle of a cms 392 // cycle. 393 bool _allow_duty_cycle_reduction; 394 395 enum { 396 _GC0_VALID = 0x1, 397 _CMS_VALID = 0x2, 398 _ALL_VALID = _GC0_VALID | _CMS_VALID 399 }; 400 401 unsigned int _valid_bits; 402 403 protected: 404 // In support of adjusting of cms trigger ratios based on history 405 // of concurrent mode failure. 406 double cms_free_adjustment_factor(size_t free) const; 407 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 408 409 public: 410 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 411 unsigned int alpha = CMSExpAvgFactor); 412 413 // Whether or not the statistics contain valid data; higher level statistics 414 // cannot be called until this returns true (they require at least one young 415 // gen and one cms cycle to have completed). 416 bool valid() const; 417 418 // Record statistics. 419 void record_gc0_begin(); 420 void record_gc0_end(size_t cms_gen_bytes_used); 421 void record_cms_begin(); 422 void record_cms_end(); 423 424 // Allow management of the cms timer, which must be stopped/started around 425 // yield points. 426 elapsedTimer& cms_timer() { return _cms_timer; } 427 void start_cms_timer() { _cms_timer.start(); } 428 void stop_cms_timer() { _cms_timer.stop(); } 429 430 // Basic statistics; units are seconds or bytes. 431 double gc0_period() const { return _gc0_period; } 432 double gc0_duration() const { return _gc0_duration; } 433 size_t gc0_promoted() const { return _gc0_promoted; } 434 double cms_period() const { return _cms_period; } 435 double cms_duration() const { return _cms_duration; } 436 size_t cms_allocated() const { return _cms_allocated; } 437 438 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 439 440 // Seconds since the last background cms cycle began or ended. 441 double cms_time_since_begin() const; 442 double cms_time_since_end() const; 443 444 // Higher level statistics--caller must check that valid() returns true before 445 // calling. 446 447 // Returns bytes promoted per second of wall clock time. 448 double promotion_rate() const; 449 450 // Returns bytes directly allocated per second of wall clock time. 451 double cms_allocation_rate() const; 452 453 // Rate at which space in the cms generation is being consumed (sum of the 454 // above two). 455 double cms_consumption_rate() const; 456 457 // Returns an estimate of the number of seconds until the cms generation will 458 // fill up, assuming no collection work is done. 459 double time_until_cms_gen_full() const; 460 461 // Returns an estimate of the number of seconds remaining until 462 // the cms generation collection should start. 463 double time_until_cms_start() const; 464 465 // End of higher level statistics. 466 467 // Debugging. 468 void print_on(outputStream* st) const PRODUCT_RETURN; 469 void print() const { print_on(gclog_or_tty); } 470 }; 471 472 // A closure related to weak references processing which 473 // we embed in the CMSCollector, since we need to pass 474 // it to the reference processor for secondary filtering 475 // of references based on reachability of referent; 476 // see role of _is_alive_non_header closure in the 477 // ReferenceProcessor class. 478 // For objects in the CMS generation, this closure checks 479 // if the object is "live" (reachable). Used in weak 480 // reference processing. 481 class CMSIsAliveClosure: public BoolObjectClosure { 482 const MemRegion _span; 483 const CMSBitMap* _bit_map; 484 485 friend class CMSCollector; 486 public: 487 CMSIsAliveClosure(MemRegion span, 488 CMSBitMap* bit_map): 489 _span(span), 490 _bit_map(bit_map) { 491 assert(!span.is_empty(), "Empty span could spell trouble"); 492 } 493 494 bool do_object_b(oop obj); 495 }; 496 497 498 // Implements AbstractRefProcTaskExecutor for CMS. 499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 500 public: 501 502 CMSRefProcTaskExecutor(CMSCollector& collector) 503 : _collector(collector) 504 { } 505 506 // Executes a task using worker threads. 507 virtual void execute(ProcessTask& task); 508 virtual void execute(EnqueueTask& task); 509 private: 510 CMSCollector& _collector; 511 }; 512 513 514 class CMSCollector: public CHeapObj<mtGC> { 515 friend class VMStructs; 516 friend class ConcurrentMarkSweepThread; 517 friend class ConcurrentMarkSweepGeneration; 518 friend class CompactibleFreeListSpace; 519 friend class CMSParMarkTask; 520 friend class CMSParInitialMarkTask; 521 friend class CMSParRemarkTask; 522 friend class CMSConcMarkingTask; 523 friend class CMSRefProcTaskProxy; 524 friend class CMSRefProcTaskExecutor; 525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 526 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 527 friend class PushOrMarkClosure; // to access _restart_addr 528 friend class Par_PushOrMarkClosure; // to access _restart_addr 529 friend class MarkFromRootsClosure; // -- ditto -- 530 // ... and for clearing cards 531 friend class Par_MarkFromRootsClosure; // to access _restart_addr 532 // ... and for clearing cards 533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 535 friend class PushAndMarkVerifyClosure; // -- ditto -- 536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 537 friend class PushAndMarkClosure; // -- ditto -- 538 friend class Par_PushAndMarkClosure; // -- ditto -- 539 friend class CMSKeepAliveClosure; // -- ditto -- 540 friend class CMSDrainMarkingStackClosure; // -- ditto -- 541 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 544 friend class VM_CMS_Operation; 545 friend class VM_CMS_Initial_Mark; 546 friend class VM_CMS_Final_Remark; 547 friend class TraceCMSMemoryManagerStats; 548 549 private: 550 jlong _time_of_last_gc; 551 void update_time_of_last_gc(jlong now) { 552 _time_of_last_gc = now; 553 } 554 555 OopTaskQueueSet* _task_queues; 556 557 // Overflow list of grey objects, threaded through mark-word 558 // Manipulated with CAS in the parallel/multi-threaded case. 559 oop _overflow_list; 560 // The following array-pair keeps track of mark words 561 // displaced for accommodating overflow list above. 562 // This code will likely be revisited under RFE#4922830. 563 Stack<oop, mtGC> _preserved_oop_stack; 564 Stack<markOop, mtGC> _preserved_mark_stack; 565 566 int* _hash_seed; 567 568 // In support of multi-threaded concurrent phases 569 YieldingFlexibleWorkGang* _conc_workers; 570 571 // Performance Counters 572 CollectorCounters* _gc_counters; 573 574 // Initialization Errors 575 bool _completed_initialization; 576 577 // In support of ExplicitGCInvokesConcurrent 578 static bool _full_gc_requested; 579 static GCCause::Cause _full_gc_cause; 580 unsigned int _collection_count_start; 581 582 // Should we unload classes this concurrent cycle? 583 bool _should_unload_classes; 584 unsigned int _concurrent_cycles_since_last_unload; 585 unsigned int concurrent_cycles_since_last_unload() const { 586 return _concurrent_cycles_since_last_unload; 587 } 588 // Did we (allow) unload classes in the previous concurrent cycle? 589 bool unloaded_classes_last_cycle() const { 590 return concurrent_cycles_since_last_unload() == 0; 591 } 592 // Root scanning options for perm gen 593 int _roots_scanning_options; 594 int roots_scanning_options() const { return _roots_scanning_options; } 595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 597 598 // Verification support 599 CMSBitMap _verification_mark_bm; 600 void verify_after_remark_work_1(); 601 void verify_after_remark_work_2(); 602 603 // True if any verification flag is on. 604 bool _verifying; 605 bool verifying() const { return _verifying; } 606 void set_verifying(bool v) { _verifying = v; } 607 608 // Collector policy 609 ConcurrentMarkSweepPolicy* _collector_policy; 610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 611 612 void set_did_compact(bool v); 613 614 // XXX Move these to CMSStats ??? FIX ME !!! 615 elapsedTimer _inter_sweep_timer; // Time between sweeps 616 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps 617 // Padded decaying average estimates of the above 618 AdaptivePaddedAverage _inter_sweep_estimate; 619 AdaptivePaddedAverage _intra_sweep_estimate; 620 621 CMSTracer* _gc_tracer_cm; 622 ConcurrentGCTimer* _gc_timer_cm; 623 624 bool _cms_start_registered; 625 626 GCHeapSummary _last_heap_summary; 627 MetaspaceSummary _last_metaspace_summary; 628 629 void register_gc_start(GCCause::Cause cause); 630 void register_gc_end(); 631 void save_heap_summary(); 632 void report_heap_summary(GCWhen::Type when); 633 634 protected: 635 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) 636 MemRegion _span; // Span covering above two 637 CardTableRS* _ct; // Card table 638 639 // CMS marking support structures 640 CMSBitMap _markBitMap; 641 CMSBitMap _modUnionTable; 642 CMSMarkStack _markStack; 643 644 HeapWord* _restart_addr; // In support of marking stack overflow 645 void lower_restart_addr(HeapWord* low); 646 647 // Counters in support of marking stack / work queue overflow handling: 648 // a non-zero value indicates certain types of overflow events during 649 // the current CMS cycle and could lead to stack resizing efforts at 650 // an opportune future time. 651 size_t _ser_pmc_preclean_ovflw; 652 size_t _ser_pmc_remark_ovflw; 653 size_t _par_pmc_remark_ovflw; 654 size_t _ser_kac_preclean_ovflw; 655 size_t _ser_kac_ovflw; 656 size_t _par_kac_ovflw; 657 NOT_PRODUCT(ssize_t _num_par_pushes;) 658 659 // ("Weak") Reference processing support. 660 ReferenceProcessor* _ref_processor; 661 CMSIsAliveClosure _is_alive_closure; 662 // Keep this textually after _markBitMap and _span; c'tor dependency. 663 664 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work 665 ModUnionClosurePar _modUnionClosurePar; 666 667 // CMS abstract state machine 668 // initial_state: Idling 669 // next_state(Idling) = {Marking} 670 // next_state(Marking) = {Precleaning, Sweeping} 671 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 672 // next_state(AbortablePreclean) = {FinalMarking} 673 // next_state(FinalMarking) = {Sweeping} 674 // next_state(Sweeping) = {Resizing} 675 // next_state(Resizing) = {Resetting} 676 // next_state(Resetting) = {Idling} 677 // The numeric values below are chosen so that: 678 // . _collectorState <= Idling == post-sweep && pre-mark 679 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 680 // precleaning || abortablePrecleanb 681 public: 682 enum CollectorState { 683 Resizing = 0, 684 Resetting = 1, 685 Idling = 2, 686 InitialMarking = 3, 687 Marking = 4, 688 Precleaning = 5, 689 AbortablePreclean = 6, 690 FinalMarking = 7, 691 Sweeping = 8 692 }; 693 protected: 694 static CollectorState _collectorState; 695 696 // State related to prologue/epilogue invocation for my generations 697 bool _between_prologue_and_epilogue; 698 699 // Signaling/State related to coordination between fore- and background GC 700 // Note: When the baton has been passed from background GC to foreground GC, 701 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 702 static bool _foregroundGCIsActive; // true iff foreground collector is active or 703 // wants to go active 704 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 705 // yet passed the baton to the foreground GC 706 707 // Support for CMSScheduleRemark (abortable preclean) 708 bool _abort_preclean; 709 bool _start_sampling; 710 711 int _numYields; 712 size_t _numDirtyCards; 713 size_t _sweep_count; 714 715 // Occupancy used for bootstrapping stats 716 double _bootstrap_occupancy; 717 718 // Timer 719 elapsedTimer _timer; 720 721 // Timing, allocation and promotion statistics, used for scheduling. 722 CMSStats _stats; 723 724 enum CMS_op_type { 725 CMS_op_checkpointRootsInitial, 726 CMS_op_checkpointRootsFinal 727 }; 728 729 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 730 bool stop_world_and_do(CMS_op_type op); 731 732 OopTaskQueueSet* task_queues() { return _task_queues; } 733 int* hash_seed(int i) { return &_hash_seed[i]; } 734 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 735 736 // Support for parallelizing Eden rescan in CMS remark phase 737 void sample_eden(); // ... sample Eden space top 738 739 private: 740 // Support for parallelizing young gen rescan in CMS remark phase 741 ParNewGeneration* _young_gen; // the younger gen 742 743 HeapWord** _top_addr; // ... Top of Eden 744 HeapWord** _end_addr; // ... End of Eden 745 Mutex* _eden_chunk_lock; 746 HeapWord** _eden_chunk_array; // ... Eden partitioning array 747 size_t _eden_chunk_index; // ... top (exclusive) of array 748 size_t _eden_chunk_capacity; // ... max entries in array 749 750 // Support for parallelizing survivor space rescan 751 HeapWord** _survivor_chunk_array; 752 size_t _survivor_chunk_index; 753 size_t _survivor_chunk_capacity; 754 size_t* _cursor; 755 ChunkArray* _survivor_plab_array; 756 757 // A bounded minimum size of PLABs, should not return too small values since 758 // this will affect the size of the data structures used for parallel young gen rescan 759 size_t plab_sample_minimum_size(); 760 761 // Support for marking stack overflow handling 762 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 763 bool par_take_from_overflow_list(size_t num, 764 OopTaskQueue* to_work_q, 765 int no_of_gc_threads); 766 void push_on_overflow_list(oop p); 767 void par_push_on_overflow_list(oop p); 768 // The following is, obviously, not, in general, "MT-stable" 769 bool overflow_list_is_empty() const; 770 771 void preserve_mark_if_necessary(oop p); 772 void par_preserve_mark_if_necessary(oop p); 773 void preserve_mark_work(oop p, markOop m); 774 void restore_preserved_marks_if_any(); 775 NOT_PRODUCT(bool no_preserved_marks() const;) 776 // In support of testing overflow code 777 NOT_PRODUCT(int _overflow_counter;) 778 NOT_PRODUCT(bool simulate_overflow();) // Sequential 779 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 780 781 // CMS work methods 782 void checkpointRootsInitialWork(); // Initial checkpoint work 783 784 // A return value of false indicates failure due to stack overflow 785 bool markFromRootsWork(); // Concurrent marking work 786 787 public: // FIX ME!!! only for testing 788 bool do_marking_st(); // Single-threaded marking 789 bool do_marking_mt(); // Multi-threaded marking 790 791 private: 792 793 // Concurrent precleaning work 794 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 795 ScanMarkedObjectsAgainCarefullyClosure* cl); 796 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 797 ScanMarkedObjectsAgainCarefullyClosure* cl); 798 // Does precleaning work, returning a quantity indicative of 799 // the amount of "useful work" done. 800 size_t preclean_work(bool clean_refs, bool clean_survivors); 801 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 802 void abortable_preclean(); // Preclean while looking for possible abort 803 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 804 // Helper function for above; merge-sorts the per-thread plab samples 805 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 806 // Resets (i.e. clears) the per-thread plab sample vectors 807 void reset_survivor_plab_arrays(); 808 809 // Final (second) checkpoint work 810 void checkpointRootsFinalWork(); 811 // Work routine for parallel version of remark 812 void do_remark_parallel(); 813 // Work routine for non-parallel version of remark 814 void do_remark_non_parallel(); 815 // Reference processing work routine (during second checkpoint) 816 void refProcessingWork(); 817 818 // Concurrent sweeping work 819 void sweepWork(ConcurrentMarkSweepGeneration* gen); 820 821 // (Concurrent) resetting of support data structures 822 void reset(bool concurrent); 823 824 // Clear _expansion_cause fields of constituent generations 825 void clear_expansion_cause(); 826 827 // An auxiliary method used to record the ends of 828 // used regions of each generation to limit the extent of sweep 829 void save_sweep_limits(); 830 831 // A work method used by the foreground collector to do 832 // a mark-sweep-compact. 833 void do_compaction_work(bool clear_all_soft_refs); 834 835 // Work methods for reporting concurrent mode interruption or failure 836 bool is_external_interruption(); 837 void report_concurrent_mode_interruption(); 838 839 // If the background GC is active, acquire control from the background 840 // GC and do the collection. 841 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 842 843 // For synchronizing passing of control from background to foreground 844 // GC. waitForForegroundGC() is called by the background 845 // collector. It if had to wait for a foreground collection, 846 // it returns true and the background collection should assume 847 // that the collection was finished by the foreground 848 // collector. 849 bool waitForForegroundGC(); 850 851 size_t block_size_using_printezis_bits(HeapWord* addr) const; 852 size_t block_size_if_printezis_bits(HeapWord* addr) const; 853 HeapWord* next_card_start_after_block(HeapWord* addr) const; 854 855 void setup_cms_unloading_and_verification_state(); 856 public: 857 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 858 CardTableRS* ct, 859 ConcurrentMarkSweepPolicy* cp); 860 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 861 862 ReferenceProcessor* ref_processor() { return _ref_processor; } 863 void ref_processor_init(); 864 865 Mutex* bitMapLock() const { return _markBitMap.lock(); } 866 static CollectorState abstract_state() { return _collectorState; } 867 868 bool should_abort_preclean() const; // Whether preclean should be aborted. 869 size_t get_eden_used() const; 870 size_t get_eden_capacity() const; 871 872 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 873 874 // Locking checks 875 NOT_PRODUCT(static bool have_cms_token();) 876 877 bool shouldConcurrentCollect(); 878 879 void collect(bool full, 880 bool clear_all_soft_refs, 881 size_t size, 882 bool tlab); 883 void collect_in_background(GCCause::Cause cause); 884 885 // In support of ExplicitGCInvokesConcurrent 886 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 887 // Should we unload classes in a particular concurrent cycle? 888 bool should_unload_classes() const { 889 return _should_unload_classes; 890 } 891 void update_should_unload_classes(); 892 893 void direct_allocated(HeapWord* start, size_t size); 894 895 // Object is dead if not marked and current phase is sweeping. 896 bool is_dead_obj(oop obj) const; 897 898 // After a promotion (of "start"), do any necessary marking. 899 // If "par", then it's being done by a parallel GC thread. 900 // The last two args indicate if we need precise marking 901 // and if so the size of the object so it can be dirtied 902 // in its entirety. 903 void promoted(bool par, HeapWord* start, 904 bool is_obj_array, size_t obj_size); 905 906 void getFreelistLocks() const; 907 void releaseFreelistLocks() const; 908 bool haveFreelistLocks() const; 909 910 // Adjust size of underlying generation 911 void compute_new_size(); 912 913 // GC prologue and epilogue 914 void gc_prologue(bool full); 915 void gc_epilogue(bool full); 916 917 jlong time_of_last_gc(jlong now) { 918 if (_collectorState <= Idling) { 919 // gc not in progress 920 return _time_of_last_gc; 921 } else { 922 // collection in progress 923 return now; 924 } 925 } 926 927 // Support for parallel remark of survivor space 928 void* get_data_recorder(int thr_num); 929 void sample_eden_chunk(); 930 931 CMSBitMap* markBitMap() { return &_markBitMap; } 932 void directAllocated(HeapWord* start, size_t size); 933 934 // Main CMS steps and related support 935 void checkpointRootsInitial(); 936 bool markFromRoots(); // a return value of false indicates failure 937 // due to stack overflow 938 void preclean(); 939 void checkpointRootsFinal(); 940 void sweep(); 941 942 // Check that the currently executing thread is the expected 943 // one (foreground collector or background collector). 944 static void check_correct_thread_executing() PRODUCT_RETURN; 945 946 bool is_cms_reachable(HeapWord* addr); 947 948 // Performance Counter Support 949 CollectorCounters* counters() { return _gc_counters; } 950 951 // Timer stuff 952 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 953 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 954 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 955 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 956 957 int yields() { return _numYields; } 958 void resetYields() { _numYields = 0; } 959 void incrementYields() { _numYields++; } 960 void resetNumDirtyCards() { _numDirtyCards = 0; } 961 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 962 size_t numDirtyCards() { return _numDirtyCards; } 963 964 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 965 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 966 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 967 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 968 size_t sweep_count() const { return _sweep_count; } 969 void increment_sweep_count() { _sweep_count++; } 970 971 // Timers/stats for gc scheduling and incremental mode pacing. 972 CMSStats& stats() { return _stats; } 973 974 // Adaptive size policy 975 AdaptiveSizePolicy* size_policy(); 976 977 static void print_on_error(outputStream* st); 978 979 // Debugging 980 void verify(); 981 bool verify_after_remark(bool silent = VerifySilently); 982 void verify_ok_to_terminate() const PRODUCT_RETURN; 983 void verify_work_stacks_empty() const PRODUCT_RETURN; 984 void verify_overflow_empty() const PRODUCT_RETURN; 985 986 // Convenience methods in support of debugging 987 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 988 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 989 990 // Accessors 991 CMSMarkStack* verification_mark_stack() { return &_markStack; } 992 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 993 994 // Initialization errors 995 bool completed_initialization() { return _completed_initialization; } 996 997 void print_eden_and_survivor_chunk_arrays(); 998 }; 999 1000 class CMSExpansionCause : public AllStatic { 1001 public: 1002 enum Cause { 1003 _no_expansion, 1004 _satisfy_free_ratio, 1005 _satisfy_promotion, 1006 _satisfy_allocation, 1007 _allocate_par_lab, 1008 _allocate_par_spooling_space, 1009 _adaptive_size_policy 1010 }; 1011 // Return a string describing the cause of the expansion. 1012 static const char* to_string(CMSExpansionCause::Cause cause); 1013 }; 1014 1015 class ConcurrentMarkSweepGeneration: public CardGeneration { 1016 friend class VMStructs; 1017 friend class ConcurrentMarkSweepThread; 1018 friend class ConcurrentMarkSweep; 1019 friend class CMSCollector; 1020 protected: 1021 static CMSCollector* _collector; // the collector that collects us 1022 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1023 1024 // Performance Counters 1025 GenerationCounters* _gen_counters; 1026 GSpaceCounters* _space_counters; 1027 1028 // Words directly allocated, used by CMSStats. 1029 size_t _direct_allocated_words; 1030 1031 // Non-product stat counters 1032 NOT_PRODUCT( 1033 size_t _numObjectsPromoted; 1034 size_t _numWordsPromoted; 1035 size_t _numObjectsAllocated; 1036 size_t _numWordsAllocated; 1037 ) 1038 1039 // Used for sizing decisions 1040 bool _incremental_collection_failed; 1041 bool incremental_collection_failed() { 1042 return _incremental_collection_failed; 1043 } 1044 void set_incremental_collection_failed() { 1045 _incremental_collection_failed = true; 1046 } 1047 void clear_incremental_collection_failed() { 1048 _incremental_collection_failed = false; 1049 } 1050 1051 // accessors 1052 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1053 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1054 1055 // Accessing spaces 1056 CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; } 1057 1058 private: 1059 // For parallel young-gen GC support. 1060 CMSParGCThreadState** _par_gc_thread_states; 1061 1062 // Reason generation was expanded 1063 CMSExpansionCause::Cause _expansion_cause; 1064 1065 // In support of MinChunkSize being larger than min object size 1066 const double _dilatation_factor; 1067 1068 // True if a compacting collection was done. 1069 bool _did_compact; 1070 bool did_compact() { return _did_compact; } 1071 1072 // Fraction of current occupancy at which to start a CMS collection which 1073 // will collect this generation (at least). 1074 double _initiating_occupancy; 1075 1076 protected: 1077 // Shrink generation by specified size (returns false if unable to shrink) 1078 void shrink_free_list_by(size_t bytes); 1079 1080 // Update statistics for GC 1081 virtual void update_gc_stats(Generation* current_generation, bool full); 1082 1083 // Maximum available space in the generation (including uncommitted) 1084 // space. 1085 size_t max_available() const; 1086 1087 // getter and initializer for _initiating_occupancy field. 1088 double initiating_occupancy() const { return _initiating_occupancy; } 1089 void init_initiating_occupancy(intx io, uintx tr); 1090 1091 void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); 1092 1093 void assert_correct_size_change_locking(); 1094 1095 public: 1096 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1097 CardTableRS* ct, 1098 bool use_adaptive_freelists, 1099 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1100 1101 // Accessors 1102 static CMSCollector* collector() { return _collector; } 1103 static void set_collector(CMSCollector* collector) { 1104 assert(_collector == NULL, "already set"); 1105 _collector = collector; 1106 } 1107 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1108 1109 Mutex* freelistLock() const; 1110 1111 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1112 1113 void set_did_compact(bool v) { _did_compact = v; } 1114 1115 bool refs_discovery_is_atomic() const { return false; } 1116 bool refs_discovery_is_mt() const { 1117 // Note: CMS does MT-discovery during the parallel-remark 1118 // phases. Use ReferenceProcessorMTMutator to make refs 1119 // discovery MT-safe during such phases or other parallel 1120 // discovery phases in the future. This may all go away 1121 // if/when we decide that refs discovery is sufficiently 1122 // rare that the cost of the CAS's involved is in the 1123 // noise. That's a measurement that should be done, and 1124 // the code simplified if that turns out to be the case. 1125 return ConcGCThreads > 1; 1126 } 1127 1128 // Override 1129 virtual void ref_processor_init(); 1130 1131 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1132 1133 // Space enquiries 1134 double occupancy() const { return ((double)used())/((double)capacity()); } 1135 size_t contiguous_available() const; 1136 size_t unsafe_max_alloc_nogc() const; 1137 1138 // over-rides 1139 MemRegion used_region_at_save_marks() const; 1140 1141 // Does a "full" (forced) collection invoked on this generation collect 1142 // all younger generations as well? Note that the second conjunct is a 1143 // hack to allow the collection of the younger gen first if the flag is 1144 // set. 1145 virtual bool full_collects_younger_generations() const { 1146 return !ScavengeBeforeFullGC; 1147 } 1148 1149 // Adjust quantities in the generation affected by 1150 // the compaction. 1151 void reset_after_compaction(); 1152 1153 // Allocation support 1154 HeapWord* allocate(size_t size, bool tlab); 1155 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1156 oop promote(oop obj, size_t obj_size); 1157 HeapWord* par_allocate(size_t size, bool tlab) { 1158 return allocate(size, tlab); 1159 } 1160 1161 1162 // Used by CMSStats to track direct allocation. The value is sampled and 1163 // reset after each young gen collection. 1164 size_t direct_allocated_words() const { return _direct_allocated_words; } 1165 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1166 1167 // Overrides for parallel promotion. 1168 virtual oop par_promote(int thread_num, 1169 oop obj, markOop m, size_t word_sz); 1170 virtual void par_promote_alloc_done(int thread_num); 1171 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1172 1173 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1174 1175 // Inform this (non-young) generation that a promotion failure was 1176 // encountered during a collection of a younger generation that 1177 // promotes into this generation. 1178 virtual void promotion_failure_occurred(); 1179 1180 bool should_collect(bool full, size_t size, bool tlab); 1181 virtual bool should_concurrent_collect() const; 1182 virtual bool is_too_full() const; 1183 void collect(bool full, 1184 bool clear_all_soft_refs, 1185 size_t size, 1186 bool tlab); 1187 1188 HeapWord* expand_and_allocate(size_t word_size, 1189 bool tlab, 1190 bool parallel = false); 1191 1192 // GC prologue and epilogue 1193 void gc_prologue(bool full); 1194 void gc_prologue_work(bool full, bool registerClosure, 1195 ModUnionClosure* modUnionClosure); 1196 void gc_epilogue(bool full); 1197 void gc_epilogue_work(bool full); 1198 1199 // Time since last GC of this generation 1200 jlong time_of_last_gc(jlong now) { 1201 return collector()->time_of_last_gc(now); 1202 } 1203 void update_time_of_last_gc(jlong now) { 1204 collector()-> update_time_of_last_gc(now); 1205 } 1206 1207 // Allocation failure 1208 void shrink(size_t bytes); 1209 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1210 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1211 1212 // Iteration support and related enquiries 1213 void save_marks(); 1214 bool no_allocs_since_save_marks(); 1215 1216 // Iteration support specific to CMS generations 1217 void save_sweep_limit(); 1218 1219 // More iteration support 1220 virtual void oop_iterate(ExtendedOopClosure* cl); 1221 virtual void safe_object_iterate(ObjectClosure* cl); 1222 virtual void object_iterate(ObjectClosure* cl); 1223 1224 // Need to declare the full complement of closures, whether we'll 1225 // override them or not, or get message from the compiler: 1226 // oop_since_save_marks_iterate_nv hides virtual function... 1227 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1228 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1229 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1230 1231 // Smart allocation XXX -- move to CFLSpace? 1232 void setNearLargestChunk(); 1233 bool isNearLargestChunk(HeapWord* addr); 1234 1235 // Get the chunk at the end of the space. Delegates to 1236 // the space. 1237 FreeChunk* find_chunk_at_end(); 1238 1239 void post_compact(); 1240 1241 // Debugging 1242 void prepare_for_verify(); 1243 void verify(); 1244 void print_statistics() PRODUCT_RETURN; 1245 1246 // Performance Counters support 1247 virtual void update_counters(); 1248 virtual void update_counters(size_t used); 1249 void initialize_performance_counters(); 1250 CollectorCounters* counters() { return collector()->counters(); } 1251 1252 // Support for parallel remark of survivor space 1253 void* get_data_recorder(int thr_num) { 1254 //Delegate to collector 1255 return collector()->get_data_recorder(thr_num); 1256 } 1257 void sample_eden_chunk() { 1258 //Delegate to collector 1259 return collector()->sample_eden_chunk(); 1260 } 1261 1262 // Printing 1263 const char* name() const; 1264 virtual const char* short_name() const { return "CMS"; } 1265 void print() const; 1266 void printOccupancy(const char* s); 1267 1268 // Resize the generation after a compacting GC. The 1269 // generation can be treated as a contiguous space 1270 // after the compaction. 1271 virtual void compute_new_size(); 1272 // Resize the generation after a non-compacting 1273 // collection. 1274 void compute_new_size_free_list(); 1275 }; 1276 1277 // 1278 // Closures of various sorts used by CMS to accomplish its work 1279 // 1280 1281 // This closure is used to do concurrent marking from the roots 1282 // following the first checkpoint. 1283 class MarkFromRootsClosure: public BitMapClosure { 1284 CMSCollector* _collector; 1285 MemRegion _span; 1286 CMSBitMap* _bitMap; 1287 CMSBitMap* _mut; 1288 CMSMarkStack* _markStack; 1289 bool _yield; 1290 int _skipBits; 1291 HeapWord* _finger; 1292 HeapWord* _threshold; 1293 DEBUG_ONLY(bool _verifying;) 1294 1295 public: 1296 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1297 CMSBitMap* bitMap, 1298 CMSMarkStack* markStack, 1299 bool should_yield, bool verifying = false); 1300 bool do_bit(size_t offset); 1301 void reset(HeapWord* addr); 1302 inline void do_yield_check(); 1303 1304 private: 1305 void scanOopsInOop(HeapWord* ptr); 1306 void do_yield_work(); 1307 }; 1308 1309 // This closure is used to do concurrent multi-threaded 1310 // marking from the roots following the first checkpoint. 1311 // XXX This should really be a subclass of The serial version 1312 // above, but i have not had the time to refactor things cleanly. 1313 class Par_MarkFromRootsClosure: public BitMapClosure { 1314 CMSCollector* _collector; 1315 MemRegion _whole_span; 1316 MemRegion _span; 1317 CMSBitMap* _bit_map; 1318 CMSBitMap* _mut; 1319 OopTaskQueue* _work_queue; 1320 CMSMarkStack* _overflow_stack; 1321 int _skip_bits; 1322 HeapWord* _finger; 1323 HeapWord* _threshold; 1324 CMSConcMarkingTask* _task; 1325 public: 1326 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1327 MemRegion span, 1328 CMSBitMap* bit_map, 1329 OopTaskQueue* work_queue, 1330 CMSMarkStack* overflow_stack); 1331 bool do_bit(size_t offset); 1332 inline void do_yield_check(); 1333 1334 private: 1335 void scan_oops_in_oop(HeapWord* ptr); 1336 void do_yield_work(); 1337 bool get_work_from_overflow_stack(); 1338 }; 1339 1340 // The following closures are used to do certain kinds of verification of 1341 // CMS marking. 1342 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { 1343 CMSCollector* _collector; 1344 MemRegion _span; 1345 CMSBitMap* _verification_bm; 1346 CMSBitMap* _cms_bm; 1347 CMSMarkStack* _mark_stack; 1348 protected: 1349 void do_oop(oop p); 1350 template <class T> inline void do_oop_work(T *p) { 1351 oop obj = oopDesc::load_decode_heap_oop(p); 1352 do_oop(obj); 1353 } 1354 public: 1355 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1356 MemRegion span, 1357 CMSBitMap* verification_bm, 1358 CMSBitMap* cms_bm, 1359 CMSMarkStack* mark_stack); 1360 void do_oop(oop* p); 1361 void do_oop(narrowOop* p); 1362 1363 // Deal with a stack overflow condition 1364 void handle_stack_overflow(HeapWord* lost); 1365 }; 1366 1367 class MarkFromRootsVerifyClosure: public BitMapClosure { 1368 CMSCollector* _collector; 1369 MemRegion _span; 1370 CMSBitMap* _verification_bm; 1371 CMSBitMap* _cms_bm; 1372 CMSMarkStack* _mark_stack; 1373 HeapWord* _finger; 1374 PushAndMarkVerifyClosure _pam_verify_closure; 1375 public: 1376 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1377 CMSBitMap* verification_bm, 1378 CMSBitMap* cms_bm, 1379 CMSMarkStack* mark_stack); 1380 bool do_bit(size_t offset); 1381 void reset(HeapWord* addr); 1382 }; 1383 1384 1385 // This closure is used to check that a certain set of bits is 1386 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1387 class FalseBitMapClosure: public BitMapClosure { 1388 public: 1389 bool do_bit(size_t offset) { 1390 guarantee(false, "Should not have a 1 bit"); 1391 return true; 1392 } 1393 }; 1394 1395 // A version of ObjectClosure with "memory" (see _previous_address below) 1396 class UpwardsObjectClosure: public BoolObjectClosure { 1397 HeapWord* _previous_address; 1398 public: 1399 UpwardsObjectClosure() : _previous_address(NULL) { } 1400 void set_previous(HeapWord* addr) { _previous_address = addr; } 1401 HeapWord* previous() { return _previous_address; } 1402 // A return value of "true" can be used by the caller to decide 1403 // if this object's end should *NOT* be recorded in 1404 // _previous_address above. 1405 virtual bool do_object_bm(oop obj, MemRegion mr) = 0; 1406 }; 1407 1408 // This closure is used during the second checkpointing phase 1409 // to rescan the marked objects on the dirty cards in the mod 1410 // union table and the card table proper. It's invoked via 1411 // MarkFromDirtyCardsClosure below. It uses either 1412 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1413 // declared in genOopClosures.hpp to accomplish some of its work. 1414 // In the parallel case the bitMap is shared, so access to 1415 // it needs to be suitably synchronized for updates by embedded 1416 // closures that update it; however, this closure itself only 1417 // reads the bit_map and because it is idempotent, is immune to 1418 // reading stale values. 1419 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1420 #ifdef ASSERT 1421 CMSCollector* _collector; 1422 MemRegion _span; 1423 union { 1424 CMSMarkStack* _mark_stack; 1425 OopTaskQueue* _work_queue; 1426 }; 1427 #endif // ASSERT 1428 bool _parallel; 1429 CMSBitMap* _bit_map; 1430 union { 1431 MarkRefsIntoAndScanClosure* _scan_closure; 1432 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1433 }; 1434 1435 public: 1436 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1437 MemRegion span, 1438 ReferenceProcessor* rp, 1439 CMSBitMap* bit_map, 1440 CMSMarkStack* mark_stack, 1441 MarkRefsIntoAndScanClosure* cl): 1442 #ifdef ASSERT 1443 _collector(collector), 1444 _span(span), 1445 _mark_stack(mark_stack), 1446 #endif // ASSERT 1447 _parallel(false), 1448 _bit_map(bit_map), 1449 _scan_closure(cl) { } 1450 1451 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1452 MemRegion span, 1453 ReferenceProcessor* rp, 1454 CMSBitMap* bit_map, 1455 OopTaskQueue* work_queue, 1456 Par_MarkRefsIntoAndScanClosure* cl): 1457 #ifdef ASSERT 1458 _collector(collector), 1459 _span(span), 1460 _work_queue(work_queue), 1461 #endif // ASSERT 1462 _parallel(true), 1463 _bit_map(bit_map), 1464 _par_scan_closure(cl) { } 1465 1466 bool do_object_b(oop obj) { 1467 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1468 return false; 1469 } 1470 bool do_object_bm(oop p, MemRegion mr); 1471 }; 1472 1473 // This closure is used during the second checkpointing phase 1474 // to rescan the marked objects on the dirty cards in the mod 1475 // union table and the card table proper. It invokes 1476 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1477 // In the parallel case, the bit map is shared and requires 1478 // synchronized access. 1479 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1480 CompactibleFreeListSpace* _space; 1481 ScanMarkedObjectsAgainClosure _scan_cl; 1482 size_t _num_dirty_cards; 1483 1484 public: 1485 MarkFromDirtyCardsClosure(CMSCollector* collector, 1486 MemRegion span, 1487 CompactibleFreeListSpace* space, 1488 CMSBitMap* bit_map, 1489 CMSMarkStack* mark_stack, 1490 MarkRefsIntoAndScanClosure* cl): 1491 _space(space), 1492 _num_dirty_cards(0), 1493 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1494 mark_stack, cl) { } 1495 1496 MarkFromDirtyCardsClosure(CMSCollector* collector, 1497 MemRegion span, 1498 CompactibleFreeListSpace* space, 1499 CMSBitMap* bit_map, 1500 OopTaskQueue* work_queue, 1501 Par_MarkRefsIntoAndScanClosure* cl): 1502 _space(space), 1503 _num_dirty_cards(0), 1504 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1505 work_queue, cl) { } 1506 1507 void do_MemRegion(MemRegion mr); 1508 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1509 size_t num_dirty_cards() { return _num_dirty_cards; } 1510 }; 1511 1512 // This closure is used in the non-product build to check 1513 // that there are no MemRegions with a certain property. 1514 class FalseMemRegionClosure: public MemRegionClosure { 1515 void do_MemRegion(MemRegion mr) { 1516 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1517 guarantee(false, "Should never be here"); 1518 } 1519 }; 1520 1521 // This closure is used during the precleaning phase 1522 // to "carefully" rescan marked objects on dirty cards. 1523 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1524 // to accomplish some of its work. 1525 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1526 CMSCollector* _collector; 1527 MemRegion _span; 1528 bool _yield; 1529 Mutex* _freelistLock; 1530 CMSBitMap* _bitMap; 1531 CMSMarkStack* _markStack; 1532 MarkRefsIntoAndScanClosure* _scanningClosure; 1533 1534 public: 1535 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1536 MemRegion span, 1537 CMSBitMap* bitMap, 1538 CMSMarkStack* markStack, 1539 MarkRefsIntoAndScanClosure* cl, 1540 bool should_yield): 1541 _collector(collector), 1542 _span(span), 1543 _yield(should_yield), 1544 _bitMap(bitMap), 1545 _markStack(markStack), 1546 _scanningClosure(cl) { 1547 } 1548 1549 void do_object(oop p) { 1550 guarantee(false, "call do_object_careful instead"); 1551 } 1552 1553 size_t do_object_careful(oop p) { 1554 guarantee(false, "Unexpected caller"); 1555 return 0; 1556 } 1557 1558 size_t do_object_careful_m(oop p, MemRegion mr); 1559 1560 void setFreelistLock(Mutex* m) { 1561 _freelistLock = m; 1562 _scanningClosure->set_freelistLock(m); 1563 } 1564 1565 private: 1566 inline bool do_yield_check(); 1567 1568 void do_yield_work(); 1569 }; 1570 1571 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1572 CMSCollector* _collector; 1573 MemRegion _span; 1574 bool _yield; 1575 CMSBitMap* _bit_map; 1576 CMSMarkStack* _mark_stack; 1577 PushAndMarkClosure* _scanning_closure; 1578 unsigned int _before_count; 1579 1580 public: 1581 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1582 MemRegion span, 1583 CMSBitMap* bit_map, 1584 CMSMarkStack* mark_stack, 1585 PushAndMarkClosure* cl, 1586 unsigned int before_count, 1587 bool should_yield): 1588 _collector(collector), 1589 _span(span), 1590 _yield(should_yield), 1591 _bit_map(bit_map), 1592 _mark_stack(mark_stack), 1593 _scanning_closure(cl), 1594 _before_count(before_count) 1595 { } 1596 1597 void do_object(oop p) { 1598 guarantee(false, "call do_object_careful instead"); 1599 } 1600 1601 size_t do_object_careful(oop p); 1602 1603 size_t do_object_careful_m(oop p, MemRegion mr) { 1604 guarantee(false, "Unexpected caller"); 1605 return 0; 1606 } 1607 1608 private: 1609 inline void do_yield_check(); 1610 void do_yield_work(); 1611 }; 1612 1613 // This closure is used to accomplish the sweeping work 1614 // after the second checkpoint but before the concurrent reset 1615 // phase. 1616 // 1617 // Terminology 1618 // left hand chunk (LHC) - block of one or more chunks currently being 1619 // coalesced. The LHC is available for coalescing with a new chunk. 1620 // right hand chunk (RHC) - block that is currently being swept that is 1621 // free or garbage that can be coalesced with the LHC. 1622 // _inFreeRange is true if there is currently a LHC 1623 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1624 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1625 // _freeFinger is the address of the current LHC 1626 class SweepClosure: public BlkClosureCareful { 1627 CMSCollector* _collector; // collector doing the work 1628 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1629 CompactibleFreeListSpace* _sp; // Space being swept 1630 HeapWord* _limit;// the address at or above which the sweep should stop 1631 // because we do not expect newly garbage blocks 1632 // eligible for sweeping past that address. 1633 Mutex* _freelistLock; // Free list lock (in space) 1634 CMSBitMap* _bitMap; // Marking bit map (in 1635 // generation) 1636 bool _inFreeRange; // Indicates if we are in the 1637 // midst of a free run 1638 bool _freeRangeInFreeLists; 1639 // Often, we have just found 1640 // a free chunk and started 1641 // a new free range; we do not 1642 // eagerly remove this chunk from 1643 // the free lists unless there is 1644 // a possibility of coalescing. 1645 // When true, this flag indicates 1646 // that the _freeFinger below 1647 // points to a potentially free chunk 1648 // that may still be in the free lists 1649 bool _lastFreeRangeCoalesced; 1650 // free range contains chunks 1651 // coalesced 1652 bool _yield; 1653 // Whether sweeping should be 1654 // done with yields. For instance 1655 // when done by the foreground 1656 // collector we shouldn't yield. 1657 HeapWord* _freeFinger; // When _inFreeRange is set, the 1658 // pointer to the "left hand 1659 // chunk" 1660 size_t _freeRangeSize; 1661 // When _inFreeRange is set, this 1662 // indicates the accumulated size 1663 // of the "left hand chunk" 1664 NOT_PRODUCT( 1665 size_t _numObjectsFreed; 1666 size_t _numWordsFreed; 1667 size_t _numObjectsLive; 1668 size_t _numWordsLive; 1669 size_t _numObjectsAlreadyFree; 1670 size_t _numWordsAlreadyFree; 1671 FreeChunk* _last_fc; 1672 ) 1673 private: 1674 // Code that is common to a free chunk or garbage when 1675 // encountered during sweeping. 1676 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1677 // Process a free chunk during sweeping. 1678 void do_already_free_chunk(FreeChunk *fc); 1679 // Work method called when processing an already free or a 1680 // freshly garbage chunk to do a lookahead and possibly a 1681 // preemptive flush if crossing over _limit. 1682 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1683 // Process a garbage chunk during sweeping. 1684 size_t do_garbage_chunk(FreeChunk *fc); 1685 // Process a live chunk during sweeping. 1686 size_t do_live_chunk(FreeChunk* fc); 1687 1688 // Accessors. 1689 HeapWord* freeFinger() const { return _freeFinger; } 1690 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1691 bool inFreeRange() const { return _inFreeRange; } 1692 void set_inFreeRange(bool v) { _inFreeRange = v; } 1693 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1694 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1695 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1696 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1697 1698 // Initialize a free range. 1699 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1700 // Return this chunk to the free lists. 1701 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1702 1703 // Check if we should yield and do so when necessary. 1704 inline void do_yield_check(HeapWord* addr); 1705 1706 // Yield 1707 void do_yield_work(HeapWord* addr); 1708 1709 // Debugging/Printing 1710 void print_free_block_coalesced(FreeChunk* fc) const; 1711 1712 public: 1713 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1714 CMSBitMap* bitMap, bool should_yield); 1715 ~SweepClosure() PRODUCT_RETURN; 1716 1717 size_t do_blk_careful(HeapWord* addr); 1718 void print() const { print_on(tty); } 1719 void print_on(outputStream *st) const; 1720 }; 1721 1722 // Closures related to weak references processing 1723 1724 // During CMS' weak reference processing, this is a 1725 // work-routine/closure used to complete transitive 1726 // marking of objects as live after a certain point 1727 // in which an initial set has been completely accumulated. 1728 // This closure is currently used both during the final 1729 // remark stop-world phase, as well as during the concurrent 1730 // precleaning of the discovered reference lists. 1731 class CMSDrainMarkingStackClosure: public VoidClosure { 1732 CMSCollector* _collector; 1733 MemRegion _span; 1734 CMSMarkStack* _mark_stack; 1735 CMSBitMap* _bit_map; 1736 CMSKeepAliveClosure* _keep_alive; 1737 bool _concurrent_precleaning; 1738 public: 1739 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1740 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1741 CMSKeepAliveClosure* keep_alive, 1742 bool cpc): 1743 _collector(collector), 1744 _span(span), 1745 _bit_map(bit_map), 1746 _mark_stack(mark_stack), 1747 _keep_alive(keep_alive), 1748 _concurrent_precleaning(cpc) { 1749 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1750 "Mismatch"); 1751 } 1752 1753 void do_void(); 1754 }; 1755 1756 // A parallel version of CMSDrainMarkingStackClosure above. 1757 class CMSParDrainMarkingStackClosure: public VoidClosure { 1758 CMSCollector* _collector; 1759 MemRegion _span; 1760 OopTaskQueue* _work_queue; 1761 CMSBitMap* _bit_map; 1762 CMSInnerParMarkAndPushClosure _mark_and_push; 1763 1764 public: 1765 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1766 MemRegion span, CMSBitMap* bit_map, 1767 OopTaskQueue* work_queue): 1768 _collector(collector), 1769 _span(span), 1770 _bit_map(bit_map), 1771 _work_queue(work_queue), 1772 _mark_and_push(collector, span, bit_map, work_queue) { } 1773 1774 public: 1775 void trim_queue(uint max); 1776 void do_void(); 1777 }; 1778 1779 // Allow yielding or short-circuiting of reference list 1780 // precleaning work. 1781 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1782 CMSCollector* _collector; 1783 void do_yield_work(); 1784 public: 1785 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1786 _collector(collector) {} 1787 virtual bool should_return(); 1788 }; 1789 1790 1791 // Convenience class that locks free list locks for given CMS collector 1792 class FreelistLocker: public StackObj { 1793 private: 1794 CMSCollector* _collector; 1795 public: 1796 FreelistLocker(CMSCollector* collector): 1797 _collector(collector) { 1798 _collector->getFreelistLocks(); 1799 } 1800 1801 ~FreelistLocker() { 1802 _collector->releaseFreelistLocks(); 1803 } 1804 }; 1805 1806 // Mark all dead objects in a given space. 1807 class MarkDeadObjectsClosure: public BlkClosure { 1808 const CMSCollector* _collector; 1809 const CompactibleFreeListSpace* _sp; 1810 CMSBitMap* _live_bit_map; 1811 CMSBitMap* _dead_bit_map; 1812 public: 1813 MarkDeadObjectsClosure(const CMSCollector* collector, 1814 const CompactibleFreeListSpace* sp, 1815 CMSBitMap *live_bit_map, 1816 CMSBitMap *dead_bit_map) : 1817 _collector(collector), 1818 _sp(sp), 1819 _live_bit_map(live_bit_map), 1820 _dead_bit_map(dead_bit_map) {} 1821 size_t do_blk(HeapWord* addr); 1822 }; 1823 1824 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1825 1826 public: 1827 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1828 }; 1829 1830 1831 #endif // SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP