1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/gcWhen.hpp" 32 #include "gc_implementation/shared/generationCounters.hpp" 33 #include "memory/freeBlockDictionary.hpp" 34 #include "memory/generation.hpp" 35 #include "memory/iterator.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/virtualspace.hpp" 38 #include "services/memoryService.hpp" 39 #include "utilities/bitMap.inline.hpp" 40 #include "utilities/stack.inline.hpp" 41 #include "utilities/taskqueue.hpp" 42 #include "utilities/yieldingWorkgroup.hpp" 43 44 // ConcurrentMarkSweepGeneration is in support of a concurrent 45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 46 // style. We assume, for now, that this generation is always the 47 // seniormost generation and for simplicity 48 // in the first implementation, that this generation is a single compactible 49 // space. Neither of these restrictions appears essential, and will be 50 // relaxed in the future when more time is available to implement the 51 // greater generality (and there's a need for it). 52 // 53 // Concurrent mode failures are currently handled by 54 // means of a sliding mark-compact. 55 56 class AdaptiveSizePolicy; 57 class CMSConcMarkingTask; 58 class CMSGCAdaptivePolicyCounters; 59 class CMSTracer; 60 class ConcurrentGCTimer; 61 class ConcurrentMarkSweepGeneration; 62 class ConcurrentMarkSweepPolicy; 63 class ConcurrentMarkSweepThread; 64 class CompactibleFreeListSpace; 65 class FreeChunk; 66 class PromotionInfo; 67 class ScanMarkedObjectsAgainCarefullyClosure; 68 class TenuredGeneration; 69 class SerialOldTracer; 70 71 // A generic CMS bit map. It's the basis for both the CMS marking bit map 72 // as well as for the mod union table (in each case only a subset of the 73 // methods are used). This is essentially a wrapper around the BitMap class, 74 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 75 // we have _shifter == 0. and for the mod union table we have 76 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 77 // XXX 64-bit issues in BitMap? 78 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 79 friend class VMStructs; 80 81 HeapWord* _bmStartWord; // base address of range covered by map 82 size_t _bmWordSize; // map size (in #HeapWords covered) 83 const int _shifter; // shifts to convert HeapWord to bit position 84 VirtualSpace _virtual_space; // underlying the bit map 85 BitMap _bm; // the bit map itself 86 public: 87 Mutex* const _lock; // mutex protecting _bm; 88 89 public: 90 // constructor 91 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 92 93 // allocates the actual storage for the map 94 bool allocate(MemRegion mr); 95 // field getter 96 Mutex* lock() const { return _lock; } 97 // locking verifier convenience function 98 void assert_locked() const PRODUCT_RETURN; 99 100 // inquiries 101 HeapWord* startWord() const { return _bmStartWord; } 102 size_t sizeInWords() const { return _bmWordSize; } 103 size_t sizeInBits() const { return _bm.size(); } 104 // the following is one past the last word in space 105 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 106 107 // reading marks 108 bool isMarked(HeapWord* addr) const; 109 bool par_isMarked(HeapWord* addr) const; // do not lock checks 110 bool isUnmarked(HeapWord* addr) const; 111 bool isAllClear() const; 112 113 // writing marks 114 void mark(HeapWord* addr); 115 // For marking by parallel GC threads; 116 // returns true if we did, false if another thread did 117 bool par_mark(HeapWord* addr); 118 119 void mark_range(MemRegion mr); 120 void par_mark_range(MemRegion mr); 121 void mark_large_range(MemRegion mr); 122 void par_mark_large_range(MemRegion mr); 123 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 124 void clear_range(MemRegion mr); 125 void par_clear_range(MemRegion mr); 126 void clear_large_range(MemRegion mr); 127 void par_clear_large_range(MemRegion mr); 128 void clear_all(); 129 void clear_all_incrementally(); // Not yet implemented!! 130 131 NOT_PRODUCT( 132 // checks the memory region for validity 133 void region_invariant(MemRegion mr); 134 ) 135 136 // iteration 137 void iterate(BitMapClosure* cl) { 138 _bm.iterate(cl); 139 } 140 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 141 void dirty_range_iterate_clear(MemRegionClosure* cl); 142 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 143 144 // auxiliary support for iteration 145 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 146 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 147 HeapWord* end_addr) const; 148 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 149 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 150 HeapWord* end_addr) const; 151 MemRegion getAndClearMarkedRegion(HeapWord* addr); 152 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 153 HeapWord* end_addr); 154 155 // conversion utilities 156 HeapWord* offsetToHeapWord(size_t offset) const; 157 size_t heapWordToOffset(HeapWord* addr) const; 158 size_t heapWordDiffToOffsetDiff(size_t diff) const; 159 160 void print_on_error(outputStream* st, const char* prefix) const; 161 162 // debugging 163 // is this address range covered by the bit-map? 164 NOT_PRODUCT( 165 bool covers(MemRegion mr) const; 166 bool covers(HeapWord* start, size_t size = 0) const; 167 ) 168 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 169 }; 170 171 // Represents a marking stack used by the CMS collector. 172 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 173 class CMSMarkStack: public CHeapObj<mtGC> { 174 // 175 friend class CMSCollector; // To get at expansion stats further below. 176 // 177 178 VirtualSpace _virtual_space; // Space for the stack 179 oop* _base; // Bottom of stack 180 size_t _index; // One more than last occupied index 181 size_t _capacity; // Max #elements 182 Mutex _par_lock; // An advisory lock used in case of parallel access 183 NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run 184 185 protected: 186 size_t _hit_limit; // We hit max stack size limit 187 size_t _failed_double; // We failed expansion before hitting limit 188 189 public: 190 CMSMarkStack(): 191 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 192 _hit_limit(0), 193 _failed_double(0) {} 194 195 bool allocate(size_t size); 196 197 size_t capacity() const { return _capacity; } 198 199 oop pop() { 200 if (!isEmpty()) { 201 return _base[--_index] ; 202 } 203 return NULL; 204 } 205 206 bool push(oop ptr) { 207 if (isFull()) { 208 return false; 209 } else { 210 _base[_index++] = ptr; 211 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 212 return true; 213 } 214 } 215 216 bool isEmpty() const { return _index == 0; } 217 bool isFull() const { 218 assert(_index <= _capacity, "buffer overflow"); 219 return _index == _capacity; 220 } 221 222 size_t length() { return _index; } 223 224 // "Parallel versions" of some of the above 225 oop par_pop() { 226 // lock and pop 227 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 228 return pop(); 229 } 230 231 bool par_push(oop ptr) { 232 // lock and push 233 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 234 return push(ptr); 235 } 236 237 // Forcibly reset the stack, losing all of its contents. 238 void reset() { 239 _index = 0; 240 } 241 242 // Expand the stack, typically in response to an overflow condition. 243 void expand(); 244 245 // Compute the least valued stack element. 246 oop least_value(HeapWord* low) { 247 oop least = (oop)low; 248 for (size_t i = 0; i < _index; i++) { 249 least = MIN2(least, _base[i]); 250 } 251 return least; 252 } 253 254 // Exposed here to allow stack expansion in || case. 255 Mutex* par_lock() { return &_par_lock; } 256 }; 257 258 class CardTableRS; 259 class CMSParGCThreadState; 260 261 class ModUnionClosure: public MemRegionClosure { 262 protected: 263 CMSBitMap* _t; 264 public: 265 ModUnionClosure(CMSBitMap* t): _t(t) { } 266 void do_MemRegion(MemRegion mr); 267 }; 268 269 class ModUnionClosurePar: public ModUnionClosure { 270 public: 271 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 272 void do_MemRegion(MemRegion mr); 273 }; 274 275 // Survivor Chunk Array in support of parallelization of 276 // Survivor Space rescan. 277 class ChunkArray: public CHeapObj<mtGC> { 278 size_t _index; 279 size_t _capacity; 280 size_t _overflows; 281 HeapWord** _array; // storage for array 282 283 public: 284 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 285 ChunkArray(HeapWord** a, size_t c): 286 _index(0), _capacity(c), _overflows(0), _array(a) {} 287 288 HeapWord** array() { return _array; } 289 void set_array(HeapWord** a) { _array = a; } 290 291 size_t capacity() { return _capacity; } 292 void set_capacity(size_t c) { _capacity = c; } 293 294 size_t end() { 295 assert(_index <= capacity(), 296 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 297 _index, _capacity)); 298 return _index; 299 } // exclusive 300 301 HeapWord* nth(size_t n) { 302 assert(n < end(), "Out of bounds access"); 303 return _array[n]; 304 } 305 306 void reset() { 307 _index = 0; 308 if (_overflows > 0 && PrintCMSStatistics > 1) { 309 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 310 _capacity, _overflows); 311 } 312 _overflows = 0; 313 } 314 315 void record_sample(HeapWord* p, size_t sz) { 316 // For now we do not do anything with the size 317 if (_index < _capacity) { 318 _array[_index++] = p; 319 } else { 320 ++_overflows; 321 assert(_index == _capacity, 322 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 323 "): out of bounds at overflow#" SIZE_FORMAT, 324 _index, _capacity, _overflows)); 325 } 326 } 327 }; 328 329 // 330 // Timing, allocation and promotion statistics for gc scheduling and incremental 331 // mode pacing. Most statistics are exponential averages. 332 // 333 class CMSStats VALUE_OBJ_CLASS_SPEC { 334 private: 335 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 336 337 // The following are exponential averages with factor alpha: 338 // avg = (100 - alpha) * avg + alpha * cur_sample 339 // 340 // The durations measure: end_time[n] - start_time[n] 341 // The periods measure: start_time[n] - start_time[n-1] 342 // 343 // The cms period and duration include only concurrent collections; time spent 344 // in foreground cms collections due to System.gc() or because of a failure to 345 // keep up are not included. 346 // 347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 348 // real value, but is used only after the first period. A value of 100 is 349 // used for the first sample so it gets the entire weight. 350 unsigned int _saved_alpha; // 0-100 351 unsigned int _gc0_alpha; 352 unsigned int _cms_alpha; 353 354 double _gc0_duration; 355 double _gc0_period; 356 size_t _gc0_promoted; // bytes promoted per gc0 357 double _cms_duration; 358 double _cms_duration_pre_sweep; // time from initiation to start of sweep 359 double _cms_duration_per_mb; 360 double _cms_period; 361 size_t _cms_allocated; // bytes of direct allocation per gc0 period 362 363 // Timers. 364 elapsedTimer _cms_timer; 365 TimeStamp _gc0_begin_time; 366 TimeStamp _cms_begin_time; 367 TimeStamp _cms_end_time; 368 369 // Snapshots of the amount used in the CMS generation. 370 size_t _cms_used_at_gc0_begin; 371 size_t _cms_used_at_gc0_end; 372 size_t _cms_used_at_cms_begin; 373 374 // Used to prevent the duty cycle from being reduced in the middle of a cms 375 // cycle. 376 bool _allow_duty_cycle_reduction; 377 378 enum { 379 _GC0_VALID = 0x1, 380 _CMS_VALID = 0x2, 381 _ALL_VALID = _GC0_VALID | _CMS_VALID 382 }; 383 384 unsigned int _valid_bits; 385 386 protected: 387 // In support of adjusting of cms trigger ratios based on history 388 // of concurrent mode failure. 389 double cms_free_adjustment_factor(size_t free) const; 390 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 391 392 public: 393 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 394 unsigned int alpha = CMSExpAvgFactor); 395 396 // Whether or not the statistics contain valid data; higher level statistics 397 // cannot be called until this returns true (they require at least one young 398 // gen and one cms cycle to have completed). 399 bool valid() const; 400 401 // Record statistics. 402 void record_gc0_begin(); 403 void record_gc0_end(size_t cms_gen_bytes_used); 404 void record_cms_begin(); 405 void record_cms_end(); 406 407 // Allow management of the cms timer, which must be stopped/started around 408 // yield points. 409 elapsedTimer& cms_timer() { return _cms_timer; } 410 void start_cms_timer() { _cms_timer.start(); } 411 void stop_cms_timer() { _cms_timer.stop(); } 412 413 // Basic statistics; units are seconds or bytes. 414 double gc0_period() const { return _gc0_period; } 415 double gc0_duration() const { return _gc0_duration; } 416 size_t gc0_promoted() const { return _gc0_promoted; } 417 double cms_period() const { return _cms_period; } 418 double cms_duration() const { return _cms_duration; } 419 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 420 size_t cms_allocated() const { return _cms_allocated; } 421 422 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 423 424 // Seconds since the last background cms cycle began or ended. 425 double cms_time_since_begin() const; 426 double cms_time_since_end() const; 427 428 // Higher level statistics--caller must check that valid() returns true before 429 // calling. 430 431 // Returns bytes promoted per second of wall clock time. 432 double promotion_rate() const; 433 434 // Returns bytes directly allocated per second of wall clock time. 435 double cms_allocation_rate() const; 436 437 // Rate at which space in the cms generation is being consumed (sum of the 438 // above two). 439 double cms_consumption_rate() const; 440 441 // Returns an estimate of the number of seconds until the cms generation will 442 // fill up, assuming no collection work is done. 443 double time_until_cms_gen_full() const; 444 445 // Returns an estimate of the number of seconds remaining until 446 // the cms generation collection should start. 447 double time_until_cms_start() const; 448 449 // End of higher level statistics. 450 451 // Debugging. 452 void print_on(outputStream* st) const PRODUCT_RETURN; 453 void print() const { print_on(gclog_or_tty); } 454 }; 455 456 // A closure related to weak references processing which 457 // we embed in the CMSCollector, since we need to pass 458 // it to the reference processor for secondary filtering 459 // of references based on reachability of referent; 460 // see role of _is_alive_non_header closure in the 461 // ReferenceProcessor class. 462 // For objects in the CMS generation, this closure checks 463 // if the object is "live" (reachable). Used in weak 464 // reference processing. 465 class CMSIsAliveClosure: public BoolObjectClosure { 466 const MemRegion _span; 467 const CMSBitMap* _bit_map; 468 469 friend class CMSCollector; 470 public: 471 CMSIsAliveClosure(MemRegion span, 472 CMSBitMap* bit_map): 473 _span(span), 474 _bit_map(bit_map) { 475 assert(!span.is_empty(), "Empty span could spell trouble"); 476 } 477 478 bool do_object_b(oop obj); 479 }; 480 481 482 // Implements AbstractRefProcTaskExecutor for CMS. 483 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 484 public: 485 486 CMSRefProcTaskExecutor(CMSCollector& collector) 487 : _collector(collector) 488 { } 489 490 // Executes a task using worker threads. 491 virtual void execute(ProcessTask& task); 492 virtual void execute(EnqueueTask& task); 493 private: 494 CMSCollector& _collector; 495 }; 496 497 498 class CMSCollector: public CHeapObj<mtGC> { 499 friend class VMStructs; 500 friend class ConcurrentMarkSweepThread; 501 friend class ConcurrentMarkSweepGeneration; 502 friend class CompactibleFreeListSpace; 503 friend class CMSParMarkTask; 504 friend class CMSParInitialMarkTask; 505 friend class CMSParRemarkTask; 506 friend class CMSConcMarkingTask; 507 friend class CMSRefProcTaskProxy; 508 friend class CMSRefProcTaskExecutor; 509 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 510 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 511 friend class PushOrMarkClosure; // to access _restart_addr 512 friend class Par_PushOrMarkClosure; // to access _restart_addr 513 friend class MarkFromRootsClosure; // -- ditto -- 514 // ... and for clearing cards 515 friend class Par_MarkFromRootsClosure; // to access _restart_addr 516 // ... and for clearing cards 517 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 518 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 519 friend class PushAndMarkVerifyClosure; // -- ditto -- 520 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 521 friend class PushAndMarkClosure; // -- ditto -- 522 friend class Par_PushAndMarkClosure; // -- ditto -- 523 friend class CMSKeepAliveClosure; // -- ditto -- 524 friend class CMSDrainMarkingStackClosure; // -- ditto -- 525 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 526 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 527 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 528 friend class VM_CMS_Operation; 529 friend class VM_CMS_Initial_Mark; 530 friend class VM_CMS_Final_Remark; 531 friend class TraceCMSMemoryManagerStats; 532 533 private: 534 jlong _time_of_last_gc; 535 void update_time_of_last_gc(jlong now) { 536 _time_of_last_gc = now; 537 } 538 539 OopTaskQueueSet* _task_queues; 540 541 // Overflow list of grey objects, threaded through mark-word 542 // Manipulated with CAS in the parallel/multi-threaded case. 543 oop _overflow_list; 544 // The following array-pair keeps track of mark words 545 // displaced for accommodating overflow list above. 546 // This code will likely be revisited under RFE#4922830. 547 Stack<oop, mtGC> _preserved_oop_stack; 548 Stack<markOop, mtGC> _preserved_mark_stack; 549 550 int* _hash_seed; 551 552 // In support of multi-threaded concurrent phases 553 YieldingFlexibleWorkGang* _conc_workers; 554 555 // Performance Counters 556 CollectorCounters* _gc_counters; 557 558 // Initialization Errors 559 bool _completed_initialization; 560 561 // In support of ExplicitGCInvokesConcurrent 562 static bool _full_gc_requested; 563 static GCCause::Cause _full_gc_cause; 564 unsigned int _collection_count_start; 565 566 // Should we unload classes this concurrent cycle? 567 bool _should_unload_classes; 568 unsigned int _concurrent_cycles_since_last_unload; 569 unsigned int concurrent_cycles_since_last_unload() const { 570 return _concurrent_cycles_since_last_unload; 571 } 572 // Did we (allow) unload classes in the previous concurrent cycle? 573 bool unloaded_classes_last_cycle() const { 574 return concurrent_cycles_since_last_unload() == 0; 575 } 576 // Root scanning options for perm gen 577 int _roots_scanning_options; 578 int roots_scanning_options() const { return _roots_scanning_options; } 579 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 580 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 581 582 // Verification support 583 CMSBitMap _verification_mark_bm; 584 void verify_after_remark_work_1(); 585 void verify_after_remark_work_2(); 586 587 // True if any verification flag is on. 588 bool _verifying; 589 bool verifying() const { return _verifying; } 590 void set_verifying(bool v) { _verifying = v; } 591 592 // Collector policy 593 ConcurrentMarkSweepPolicy* _collector_policy; 594 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 595 596 void set_did_compact(bool v); 597 598 // XXX Move these to CMSStats ??? FIX ME !!! 599 elapsedTimer _inter_sweep_timer; // Time between sweeps 600 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps 601 // Padded decaying average estimates of the above 602 AdaptivePaddedAverage _inter_sweep_estimate; 603 AdaptivePaddedAverage _intra_sweep_estimate; 604 605 CMSTracer* _gc_tracer_cm; 606 ConcurrentGCTimer* _gc_timer_cm; 607 608 bool _cms_start_registered; 609 610 GCHeapSummary _last_heap_summary; 611 MetaspaceSummary _last_metaspace_summary; 612 613 void register_foreground_gc_start(GCCause::Cause cause); 614 void register_gc_start(GCCause::Cause cause); 615 void register_gc_end(); 616 void save_heap_summary(); 617 void report_heap_summary(GCWhen::Type when); 618 619 protected: 620 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) 621 MemRegion _span; // Span covering above two 622 CardTableRS* _ct; // Card table 623 624 // CMS marking support structures 625 CMSBitMap _markBitMap; 626 CMSBitMap _modUnionTable; 627 CMSMarkStack _markStack; 628 629 HeapWord* _restart_addr; // In support of marking stack overflow 630 void lower_restart_addr(HeapWord* low); 631 632 // Counters in support of marking stack / work queue overflow handling: 633 // a non-zero value indicates certain types of overflow events during 634 // the current CMS cycle and could lead to stack resizing efforts at 635 // an opportune future time. 636 size_t _ser_pmc_preclean_ovflw; 637 size_t _ser_pmc_remark_ovflw; 638 size_t _par_pmc_remark_ovflw; 639 size_t _ser_kac_preclean_ovflw; 640 size_t _ser_kac_ovflw; 641 size_t _par_kac_ovflw; 642 NOT_PRODUCT(ssize_t _num_par_pushes;) 643 644 // ("Weak") Reference processing support. 645 ReferenceProcessor* _ref_processor; 646 CMSIsAliveClosure _is_alive_closure; 647 // Keep this textually after _markBitMap and _span; c'tor dependency. 648 649 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work 650 ModUnionClosure _modUnionClosure; 651 ModUnionClosurePar _modUnionClosurePar; 652 653 // CMS abstract state machine 654 // initial_state: Idling 655 // next_state(Idling) = {Marking} 656 // next_state(Marking) = {Precleaning, Sweeping} 657 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 658 // next_state(AbortablePreclean) = {FinalMarking} 659 // next_state(FinalMarking) = {Sweeping} 660 // next_state(Sweeping) = {Resizing} 661 // next_state(Resizing) = {Resetting} 662 // next_state(Resetting) = {Idling} 663 // The numeric values below are chosen so that: 664 // . _collectorState <= Idling == post-sweep && pre-mark 665 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 666 // precleaning || abortablePrecleanb 667 public: 668 enum CollectorState { 669 Resizing = 0, 670 Resetting = 1, 671 Idling = 2, 672 InitialMarking = 3, 673 Marking = 4, 674 Precleaning = 5, 675 AbortablePreclean = 6, 676 FinalMarking = 7, 677 Sweeping = 8 678 }; 679 protected: 680 static CollectorState _collectorState; 681 682 // State related to prologue/epilogue invocation for my generations 683 bool _between_prologue_and_epilogue; 684 685 // Signaling/State related to coordination between fore- and background GC 686 // Note: When the baton has been passed from background GC to foreground GC, 687 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 688 static bool _foregroundGCIsActive; // true iff foreground collector is active or 689 // wants to go active 690 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 691 // yet passed the baton to the foreground GC 692 693 // Support for CMSScheduleRemark (abortable preclean) 694 bool _abort_preclean; 695 bool _start_sampling; 696 697 int _numYields; 698 size_t _numDirtyCards; 699 size_t _sweep_count; 700 // Number of full gc's since the last concurrent gc. 701 uint _full_gcs_since_conc_gc; 702 703 // Occupancy used for bootstrapping stats 704 double _bootstrap_occupancy; 705 706 // Timer 707 elapsedTimer _timer; 708 709 // Timing, allocation and promotion statistics, used for scheduling. 710 CMSStats _stats; 711 712 enum CMS_op_type { 713 CMS_op_checkpointRootsInitial, 714 CMS_op_checkpointRootsFinal 715 }; 716 717 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 718 bool stop_world_and_do(CMS_op_type op); 719 720 OopTaskQueueSet* task_queues() { return _task_queues; } 721 int* hash_seed(int i) { return &_hash_seed[i]; } 722 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 723 724 // Support for parallelizing Eden rescan in CMS remark phase 725 void sample_eden(); // ... sample Eden space top 726 727 private: 728 // Support for parallelizing young gen rescan in CMS remark phase 729 Generation* _young_gen; // the younger gen 730 HeapWord** _top_addr; // ... Top of Eden 731 HeapWord** _end_addr; // ... End of Eden 732 Mutex* _eden_chunk_lock; 733 HeapWord** _eden_chunk_array; // ... Eden partitioning array 734 size_t _eden_chunk_index; // ... top (exclusive) of array 735 size_t _eden_chunk_capacity; // ... max entries in array 736 737 // Support for parallelizing survivor space rescan 738 HeapWord** _survivor_chunk_array; 739 size_t _survivor_chunk_index; 740 size_t _survivor_chunk_capacity; 741 size_t* _cursor; 742 ChunkArray* _survivor_plab_array; 743 744 // Support for marking stack overflow handling 745 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 746 bool par_take_from_overflow_list(size_t num, 747 OopTaskQueue* to_work_q, 748 int no_of_gc_threads); 749 void push_on_overflow_list(oop p); 750 void par_push_on_overflow_list(oop p); 751 // The following is, obviously, not, in general, "MT-stable" 752 bool overflow_list_is_empty() const; 753 754 void preserve_mark_if_necessary(oop p); 755 void par_preserve_mark_if_necessary(oop p); 756 void preserve_mark_work(oop p, markOop m); 757 void restore_preserved_marks_if_any(); 758 NOT_PRODUCT(bool no_preserved_marks() const;) 759 // In support of testing overflow code 760 NOT_PRODUCT(int _overflow_counter;) 761 NOT_PRODUCT(bool simulate_overflow();) // Sequential 762 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 763 764 // CMS work methods 765 void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work 766 767 // A return value of false indicates failure due to stack overflow 768 bool markFromRootsWork(bool asynch); // Concurrent marking work 769 770 public: // FIX ME!!! only for testing 771 bool do_marking_st(bool asynch); // Single-threaded marking 772 bool do_marking_mt(bool asynch); // Multi-threaded marking 773 774 private: 775 776 // Concurrent precleaning work 777 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 778 ScanMarkedObjectsAgainCarefullyClosure* cl); 779 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 780 ScanMarkedObjectsAgainCarefullyClosure* cl); 781 // Does precleaning work, returning a quantity indicative of 782 // the amount of "useful work" done. 783 size_t preclean_work(bool clean_refs, bool clean_survivors); 784 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 785 void abortable_preclean(); // Preclean while looking for possible abort 786 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 787 // Helper function for above; merge-sorts the per-thread plab samples 788 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 789 // Resets (i.e. clears) the per-thread plab sample vectors 790 void reset_survivor_plab_arrays(); 791 792 // Final (second) checkpoint work 793 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 794 bool init_mark_was_synchronous); 795 // Work routine for parallel version of remark 796 void do_remark_parallel(); 797 // Work routine for non-parallel version of remark 798 void do_remark_non_parallel(); 799 // Reference processing work routine (during second checkpoint) 800 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 801 802 // Concurrent sweeping work 803 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 804 805 // (Concurrent) resetting of support data structures 806 void reset(bool asynch); 807 808 // Clear _expansion_cause fields of constituent generations 809 void clear_expansion_cause(); 810 811 // An auxiliary method used to record the ends of 812 // used regions of each generation to limit the extent of sweep 813 void save_sweep_limits(); 814 815 // A work method used by foreground collection to determine 816 // what type of collection (compacting or not, continuing or fresh) 817 // it should do. 818 void decide_foreground_collection_type(bool clear_all_soft_refs, 819 bool* should_compact, bool* should_start_over); 820 821 // A work method used by the foreground collector to do 822 // a mark-sweep-compact. 823 void do_compaction_work(bool clear_all_soft_refs); 824 825 // A work method used by the foreground collector to do 826 // a mark-sweep, after taking over from a possibly on-going 827 // concurrent mark-sweep collection. 828 void do_mark_sweep_work(bool clear_all_soft_refs, 829 CollectorState first_state, bool should_start_over); 830 831 // Work methods for reporting concurrent mode interruption or failure 832 bool is_external_interruption(); 833 void report_concurrent_mode_interruption(); 834 835 // If the background GC is active, acquire control from the background 836 // GC and do the collection. 837 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 838 839 // For synchronizing passing of control from background to foreground 840 // GC. waitForForegroundGC() is called by the background 841 // collector. It if had to wait for a foreground collection, 842 // it returns true and the background collection should assume 843 // that the collection was finished by the foreground 844 // collector. 845 bool waitForForegroundGC(); 846 847 size_t block_size_using_printezis_bits(HeapWord* addr) const; 848 size_t block_size_if_printezis_bits(HeapWord* addr) const; 849 HeapWord* next_card_start_after_block(HeapWord* addr) const; 850 851 void setup_cms_unloading_and_verification_state(); 852 public: 853 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 854 CardTableRS* ct, 855 ConcurrentMarkSweepPolicy* cp); 856 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 857 858 ReferenceProcessor* ref_processor() { return _ref_processor; } 859 void ref_processor_init(); 860 861 Mutex* bitMapLock() const { return _markBitMap.lock(); } 862 static CollectorState abstract_state() { return _collectorState; } 863 864 bool should_abort_preclean() const; // Whether preclean should be aborted. 865 size_t get_eden_used() const; 866 size_t get_eden_capacity() const; 867 868 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 869 870 // Locking checks 871 NOT_PRODUCT(static bool have_cms_token();) 872 873 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 874 bool shouldConcurrentCollect(); 875 876 void collect(bool full, 877 bool clear_all_soft_refs, 878 size_t size, 879 bool tlab); 880 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); 881 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); 882 883 // In support of ExplicitGCInvokesConcurrent 884 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 885 // Should we unload classes in a particular concurrent cycle? 886 bool should_unload_classes() const { 887 return _should_unload_classes; 888 } 889 void update_should_unload_classes(); 890 891 void direct_allocated(HeapWord* start, size_t size); 892 893 // Object is dead if not marked and current phase is sweeping. 894 bool is_dead_obj(oop obj) const; 895 896 // After a promotion (of "start"), do any necessary marking. 897 // If "par", then it's being done by a parallel GC thread. 898 // The last two args indicate if we need precise marking 899 // and if so the size of the object so it can be dirtied 900 // in its entirety. 901 void promoted(bool par, HeapWord* start, 902 bool is_obj_array, size_t obj_size); 903 904 void getFreelistLocks() const; 905 void releaseFreelistLocks() const; 906 bool haveFreelistLocks() const; 907 908 // Adjust size of underlying generation 909 void compute_new_size(); 910 911 // GC prologue and epilogue 912 void gc_prologue(bool full); 913 void gc_epilogue(bool full); 914 915 jlong time_of_last_gc(jlong now) { 916 if (_collectorState <= Idling) { 917 // gc not in progress 918 return _time_of_last_gc; 919 } else { 920 // collection in progress 921 return now; 922 } 923 } 924 925 // Support for parallel remark of survivor space 926 void* get_data_recorder(int thr_num); 927 void sample_eden_chunk(); 928 929 CMSBitMap* markBitMap() { return &_markBitMap; } 930 void directAllocated(HeapWord* start, size_t size); 931 932 // Main CMS steps and related support 933 void checkpointRootsInitial(bool asynch); 934 bool markFromRoots(bool asynch); // a return value of false indicates failure 935 // due to stack overflow 936 void preclean(); 937 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 938 bool init_mark_was_synchronous); 939 void sweep(bool asynch); 940 941 // Check that the currently executing thread is the expected 942 // one (foreground collector or background collector). 943 static void check_correct_thread_executing() PRODUCT_RETURN; 944 // XXXPERM void print_statistics() PRODUCT_RETURN; 945 946 bool is_cms_reachable(HeapWord* addr); 947 948 // Performance Counter Support 949 CollectorCounters* counters() { return _gc_counters; } 950 951 // Timer stuff 952 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 953 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 954 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 955 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 956 957 int yields() { return _numYields; } 958 void resetYields() { _numYields = 0; } 959 void incrementYields() { _numYields++; } 960 void resetNumDirtyCards() { _numDirtyCards = 0; } 961 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 962 size_t numDirtyCards() { return _numDirtyCards; } 963 964 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 965 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 966 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 967 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 968 size_t sweep_count() const { return _sweep_count; } 969 void increment_sweep_count() { _sweep_count++; } 970 971 // Timers/stats for gc scheduling and incremental mode pacing. 972 CMSStats& stats() { return _stats; } 973 974 // Adaptive size policy 975 AdaptiveSizePolicy* size_policy(); 976 977 static void print_on_error(outputStream* st); 978 979 // Debugging 980 void verify(); 981 bool verify_after_remark(bool silent = VerifySilently); 982 void verify_ok_to_terminate() const PRODUCT_RETURN; 983 void verify_work_stacks_empty() const PRODUCT_RETURN; 984 void verify_overflow_empty() const PRODUCT_RETURN; 985 986 // Convenience methods in support of debugging 987 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 988 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 989 990 // Accessors 991 CMSMarkStack* verification_mark_stack() { return &_markStack; } 992 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 993 994 // Initialization errors 995 bool completed_initialization() { return _completed_initialization; } 996 997 void print_eden_and_survivor_chunk_arrays(); 998 }; 999 1000 class CMSExpansionCause : public AllStatic { 1001 public: 1002 enum Cause { 1003 _no_expansion, 1004 _satisfy_free_ratio, 1005 _satisfy_promotion, 1006 _satisfy_allocation, 1007 _allocate_par_lab, 1008 _allocate_par_spooling_space, 1009 _adaptive_size_policy 1010 }; 1011 // Return a string describing the cause of the expansion. 1012 static const char* to_string(CMSExpansionCause::Cause cause); 1013 }; 1014 1015 class ConcurrentMarkSweepGeneration: public CardGeneration { 1016 friend class VMStructs; 1017 friend class ConcurrentMarkSweepThread; 1018 friend class ConcurrentMarkSweep; 1019 friend class CMSCollector; 1020 protected: 1021 static CMSCollector* _collector; // the collector that collects us 1022 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1023 1024 // Performance Counters 1025 GenerationCounters* _gen_counters; 1026 GSpaceCounters* _space_counters; 1027 1028 // Words directly allocated, used by CMSStats. 1029 size_t _direct_allocated_words; 1030 1031 // Non-product stat counters 1032 NOT_PRODUCT( 1033 size_t _numObjectsPromoted; 1034 size_t _numWordsPromoted; 1035 size_t _numObjectsAllocated; 1036 size_t _numWordsAllocated; 1037 ) 1038 1039 // Used for sizing decisions 1040 bool _incremental_collection_failed; 1041 bool incremental_collection_failed() { 1042 return _incremental_collection_failed; 1043 } 1044 void set_incremental_collection_failed() { 1045 _incremental_collection_failed = true; 1046 } 1047 void clear_incremental_collection_failed() { 1048 _incremental_collection_failed = false; 1049 } 1050 1051 // accessors 1052 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1053 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1054 1055 private: 1056 // For parallel young-gen GC support. 1057 CMSParGCThreadState** _par_gc_thread_states; 1058 1059 // Reason generation was expanded 1060 CMSExpansionCause::Cause _expansion_cause; 1061 1062 // In support of MinChunkSize being larger than min object size 1063 const double _dilatation_factor; 1064 1065 enum CollectionTypes { 1066 Concurrent_collection_type = 0, 1067 MS_foreground_collection_type = 1, 1068 MSC_foreground_collection_type = 2, 1069 Unknown_collection_type = 3 1070 }; 1071 1072 CollectionTypes _debug_collection_type; 1073 1074 // True if a compacting collection was done. 1075 bool _did_compact; 1076 bool did_compact() { return _did_compact; } 1077 1078 // Fraction of current occupancy at which to start a CMS collection which 1079 // will collect this generation (at least). 1080 double _initiating_occupancy; 1081 1082 protected: 1083 // Shrink generation by specified size (returns false if unable to shrink) 1084 void shrink_free_list_by(size_t bytes); 1085 1086 // Update statistics for GC 1087 virtual void update_gc_stats(int level, bool full); 1088 1089 // Maximum available space in the generation (including uncommitted) 1090 // space. 1091 size_t max_available() const; 1092 1093 // getter and initializer for _initiating_occupancy field. 1094 double initiating_occupancy() const { return _initiating_occupancy; } 1095 void init_initiating_occupancy(intx io, uintx tr); 1096 1097 public: 1098 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1099 int level, CardTableRS* ct, 1100 bool use_adaptive_freelists, 1101 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1102 1103 // Accessors 1104 CMSCollector* collector() const { return _collector; } 1105 static void set_collector(CMSCollector* collector) { 1106 assert(_collector == NULL, "already set"); 1107 _collector = collector; 1108 } 1109 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1110 1111 Mutex* freelistLock() const; 1112 1113 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1114 1115 void set_did_compact(bool v) { _did_compact = v; } 1116 1117 bool refs_discovery_is_atomic() const { return false; } 1118 bool refs_discovery_is_mt() const { 1119 // Note: CMS does MT-discovery during the parallel-remark 1120 // phases. Use ReferenceProcessorMTMutator to make refs 1121 // discovery MT-safe during such phases or other parallel 1122 // discovery phases in the future. This may all go away 1123 // if/when we decide that refs discovery is sufficiently 1124 // rare that the cost of the CAS's involved is in the 1125 // noise. That's a measurement that should be done, and 1126 // the code simplified if that turns out to be the case. 1127 return ConcGCThreads > 1; 1128 } 1129 1130 // Override 1131 virtual void ref_processor_init(); 1132 1133 // Grow generation by specified size (returns false if unable to grow) 1134 bool grow_by(size_t bytes); 1135 // Grow generation to reserved size. 1136 bool grow_to_reserved(); 1137 1138 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1139 1140 // Space enquiries 1141 size_t capacity() const; 1142 size_t used() const; 1143 size_t free() const; 1144 double occupancy() const { return ((double)used())/((double)capacity()); } 1145 size_t contiguous_available() const; 1146 size_t unsafe_max_alloc_nogc() const; 1147 1148 // over-rides 1149 MemRegion used_region() const; 1150 MemRegion used_region_at_save_marks() const; 1151 1152 // Does a "full" (forced) collection invoked on this generation collect 1153 // all younger generations as well? Note that the second conjunct is a 1154 // hack to allow the collection of the younger gen first if the flag is 1155 // set. 1156 virtual bool full_collects_younger_generations() const { 1157 return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC; 1158 } 1159 1160 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1161 1162 // Support for compaction 1163 CompactibleSpace* first_compaction_space() const; 1164 // Adjust quantities in the generation affected by 1165 // the compaction. 1166 void reset_after_compaction(); 1167 1168 // Allocation support 1169 HeapWord* allocate(size_t size, bool tlab); 1170 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1171 oop promote(oop obj, size_t obj_size); 1172 HeapWord* par_allocate(size_t size, bool tlab) { 1173 return allocate(size, tlab); 1174 } 1175 1176 1177 // Used by CMSStats to track direct allocation. The value is sampled and 1178 // reset after each young gen collection. 1179 size_t direct_allocated_words() const { return _direct_allocated_words; } 1180 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1181 1182 // Overrides for parallel promotion. 1183 virtual oop par_promote(int thread_num, 1184 oop obj, markOop m, size_t word_sz); 1185 // This one should not be called for CMS. 1186 virtual void par_promote_alloc_undo(int thread_num, 1187 HeapWord* obj, size_t word_sz); 1188 virtual void par_promote_alloc_done(int thread_num); 1189 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1190 1191 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1192 1193 // Inform this (non-young) generation that a promotion failure was 1194 // encountered during a collection of a younger generation that 1195 // promotes into this generation. 1196 virtual void promotion_failure_occurred(); 1197 1198 bool should_collect(bool full, size_t size, bool tlab); 1199 virtual bool should_concurrent_collect() const; 1200 virtual bool is_too_full() const; 1201 void collect(bool full, 1202 bool clear_all_soft_refs, 1203 size_t size, 1204 bool tlab); 1205 1206 HeapWord* expand_and_allocate(size_t word_size, 1207 bool tlab, 1208 bool parallel = false); 1209 1210 // GC prologue and epilogue 1211 void gc_prologue(bool full); 1212 void gc_prologue_work(bool full, bool registerClosure, 1213 ModUnionClosure* modUnionClosure); 1214 void gc_epilogue(bool full); 1215 void gc_epilogue_work(bool full); 1216 1217 // Time since last GC of this generation 1218 jlong time_of_last_gc(jlong now) { 1219 return collector()->time_of_last_gc(now); 1220 } 1221 void update_time_of_last_gc(jlong now) { 1222 collector()-> update_time_of_last_gc(now); 1223 } 1224 1225 // Allocation failure 1226 void expand(size_t bytes, size_t expand_bytes, 1227 CMSExpansionCause::Cause cause); 1228 virtual bool expand(size_t bytes, size_t expand_bytes); 1229 void shrink(size_t bytes); 1230 void shrink_by(size_t bytes); 1231 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1232 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1233 1234 // Iteration support and related enquiries 1235 void save_marks(); 1236 bool no_allocs_since_save_marks(); 1237 void younger_refs_iterate(OopsInGenClosure* cl); 1238 1239 // Iteration support specific to CMS generations 1240 void save_sweep_limit(); 1241 1242 // More iteration support 1243 virtual void oop_iterate(ExtendedOopClosure* cl); 1244 virtual void safe_object_iterate(ObjectClosure* cl); 1245 virtual void object_iterate(ObjectClosure* cl); 1246 1247 // Need to declare the full complement of closures, whether we'll 1248 // override them or not, or get message from the compiler: 1249 // oop_since_save_marks_iterate_nv hides virtual function... 1250 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1251 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1252 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1253 1254 // Smart allocation XXX -- move to CFLSpace? 1255 void setNearLargestChunk(); 1256 bool isNearLargestChunk(HeapWord* addr); 1257 1258 // Get the chunk at the end of the space. Delegates to 1259 // the space. 1260 FreeChunk* find_chunk_at_end(); 1261 1262 void post_compact(); 1263 1264 // Debugging 1265 void prepare_for_verify(); 1266 void verify(); 1267 void print_statistics() PRODUCT_RETURN; 1268 1269 // Performance Counters support 1270 virtual void update_counters(); 1271 virtual void update_counters(size_t used); 1272 void initialize_performance_counters(); 1273 CollectorCounters* counters() { return collector()->counters(); } 1274 1275 // Support for parallel remark of survivor space 1276 void* get_data_recorder(int thr_num) { 1277 //Delegate to collector 1278 return collector()->get_data_recorder(thr_num); 1279 } 1280 void sample_eden_chunk() { 1281 //Delegate to collector 1282 return collector()->sample_eden_chunk(); 1283 } 1284 1285 // Printing 1286 const char* name() const; 1287 virtual const char* short_name() const { return "CMS"; } 1288 void print() const; 1289 void printOccupancy(const char* s); 1290 bool must_be_youngest() const { return false; } 1291 bool must_be_oldest() const { return true; } 1292 1293 // Resize the generation after a compacting GC. The 1294 // generation can be treated as a contiguous space 1295 // after the compaction. 1296 virtual void compute_new_size(); 1297 // Resize the generation after a non-compacting 1298 // collection. 1299 void compute_new_size_free_list(); 1300 1301 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1302 void rotate_debug_collection_type(); 1303 }; 1304 1305 // 1306 // Closures of various sorts used by CMS to accomplish its work 1307 // 1308 1309 // This closure is used to do concurrent marking from the roots 1310 // following the first checkpoint. 1311 class MarkFromRootsClosure: public BitMapClosure { 1312 CMSCollector* _collector; 1313 MemRegion _span; 1314 CMSBitMap* _bitMap; 1315 CMSBitMap* _mut; 1316 CMSMarkStack* _markStack; 1317 bool _yield; 1318 int _skipBits; 1319 HeapWord* _finger; 1320 HeapWord* _threshold; 1321 DEBUG_ONLY(bool _verifying;) 1322 1323 public: 1324 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1325 CMSBitMap* bitMap, 1326 CMSMarkStack* markStack, 1327 bool should_yield, bool verifying = false); 1328 bool do_bit(size_t offset); 1329 void reset(HeapWord* addr); 1330 inline void do_yield_check(); 1331 1332 private: 1333 void scanOopsInOop(HeapWord* ptr); 1334 void do_yield_work(); 1335 }; 1336 1337 // This closure is used to do concurrent multi-threaded 1338 // marking from the roots following the first checkpoint. 1339 // XXX This should really be a subclass of The serial version 1340 // above, but i have not had the time to refactor things cleanly. 1341 class Par_MarkFromRootsClosure: public BitMapClosure { 1342 CMSCollector* _collector; 1343 MemRegion _whole_span; 1344 MemRegion _span; 1345 CMSBitMap* _bit_map; 1346 CMSBitMap* _mut; 1347 OopTaskQueue* _work_queue; 1348 CMSMarkStack* _overflow_stack; 1349 bool _yield; 1350 int _skip_bits; 1351 HeapWord* _finger; 1352 HeapWord* _threshold; 1353 CMSConcMarkingTask* _task; 1354 public: 1355 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1356 MemRegion span, 1357 CMSBitMap* bit_map, 1358 OopTaskQueue* work_queue, 1359 CMSMarkStack* overflow_stack, 1360 bool should_yield); 1361 bool do_bit(size_t offset); 1362 inline void do_yield_check(); 1363 1364 private: 1365 void scan_oops_in_oop(HeapWord* ptr); 1366 void do_yield_work(); 1367 bool get_work_from_overflow_stack(); 1368 }; 1369 1370 // The following closures are used to do certain kinds of verification of 1371 // CMS marking. 1372 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { 1373 CMSCollector* _collector; 1374 MemRegion _span; 1375 CMSBitMap* _verification_bm; 1376 CMSBitMap* _cms_bm; 1377 CMSMarkStack* _mark_stack; 1378 protected: 1379 void do_oop(oop p); 1380 template <class T> inline void do_oop_work(T *p) { 1381 oop obj = oopDesc::load_decode_heap_oop(p); 1382 do_oop(obj); 1383 } 1384 public: 1385 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1386 MemRegion span, 1387 CMSBitMap* verification_bm, 1388 CMSBitMap* cms_bm, 1389 CMSMarkStack* mark_stack); 1390 void do_oop(oop* p); 1391 void do_oop(narrowOop* p); 1392 1393 // Deal with a stack overflow condition 1394 void handle_stack_overflow(HeapWord* lost); 1395 }; 1396 1397 class MarkFromRootsVerifyClosure: public BitMapClosure { 1398 CMSCollector* _collector; 1399 MemRegion _span; 1400 CMSBitMap* _verification_bm; 1401 CMSBitMap* _cms_bm; 1402 CMSMarkStack* _mark_stack; 1403 HeapWord* _finger; 1404 PushAndMarkVerifyClosure _pam_verify_closure; 1405 public: 1406 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1407 CMSBitMap* verification_bm, 1408 CMSBitMap* cms_bm, 1409 CMSMarkStack* mark_stack); 1410 bool do_bit(size_t offset); 1411 void reset(HeapWord* addr); 1412 }; 1413 1414 1415 // This closure is used to check that a certain set of bits is 1416 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1417 class FalseBitMapClosure: public BitMapClosure { 1418 public: 1419 bool do_bit(size_t offset) { 1420 guarantee(false, "Should not have a 1 bit"); 1421 return true; 1422 } 1423 }; 1424 1425 // A version of ObjectClosure with "memory" (see _previous_address below) 1426 class UpwardsObjectClosure: public BoolObjectClosure { 1427 HeapWord* _previous_address; 1428 public: 1429 UpwardsObjectClosure() : _previous_address(NULL) { } 1430 void set_previous(HeapWord* addr) { _previous_address = addr; } 1431 HeapWord* previous() { return _previous_address; } 1432 // A return value of "true" can be used by the caller to decide 1433 // if this object's end should *NOT* be recorded in 1434 // _previous_address above. 1435 virtual bool do_object_bm(oop obj, MemRegion mr) = 0; 1436 }; 1437 1438 // This closure is used during the second checkpointing phase 1439 // to rescan the marked objects on the dirty cards in the mod 1440 // union table and the card table proper. It's invoked via 1441 // MarkFromDirtyCardsClosure below. It uses either 1442 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1443 // declared in genOopClosures.hpp to accomplish some of its work. 1444 // In the parallel case the bitMap is shared, so access to 1445 // it needs to be suitably synchronized for updates by embedded 1446 // closures that update it; however, this closure itself only 1447 // reads the bit_map and because it is idempotent, is immune to 1448 // reading stale values. 1449 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1450 #ifdef ASSERT 1451 CMSCollector* _collector; 1452 MemRegion _span; 1453 union { 1454 CMSMarkStack* _mark_stack; 1455 OopTaskQueue* _work_queue; 1456 }; 1457 #endif // ASSERT 1458 bool _parallel; 1459 CMSBitMap* _bit_map; 1460 union { 1461 MarkRefsIntoAndScanClosure* _scan_closure; 1462 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1463 }; 1464 1465 public: 1466 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1467 MemRegion span, 1468 ReferenceProcessor* rp, 1469 CMSBitMap* bit_map, 1470 CMSMarkStack* mark_stack, 1471 MarkRefsIntoAndScanClosure* cl): 1472 #ifdef ASSERT 1473 _collector(collector), 1474 _span(span), 1475 _mark_stack(mark_stack), 1476 #endif // ASSERT 1477 _parallel(false), 1478 _bit_map(bit_map), 1479 _scan_closure(cl) { } 1480 1481 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1482 MemRegion span, 1483 ReferenceProcessor* rp, 1484 CMSBitMap* bit_map, 1485 OopTaskQueue* work_queue, 1486 Par_MarkRefsIntoAndScanClosure* cl): 1487 #ifdef ASSERT 1488 _collector(collector), 1489 _span(span), 1490 _work_queue(work_queue), 1491 #endif // ASSERT 1492 _parallel(true), 1493 _bit_map(bit_map), 1494 _par_scan_closure(cl) { } 1495 1496 bool do_object_b(oop obj) { 1497 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1498 return false; 1499 } 1500 bool do_object_bm(oop p, MemRegion mr); 1501 }; 1502 1503 // This closure is used during the second checkpointing phase 1504 // to rescan the marked objects on the dirty cards in the mod 1505 // union table and the card table proper. It invokes 1506 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1507 // In the parallel case, the bit map is shared and requires 1508 // synchronized access. 1509 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1510 CompactibleFreeListSpace* _space; 1511 ScanMarkedObjectsAgainClosure _scan_cl; 1512 size_t _num_dirty_cards; 1513 1514 public: 1515 MarkFromDirtyCardsClosure(CMSCollector* collector, 1516 MemRegion span, 1517 CompactibleFreeListSpace* space, 1518 CMSBitMap* bit_map, 1519 CMSMarkStack* mark_stack, 1520 MarkRefsIntoAndScanClosure* cl): 1521 _space(space), 1522 _num_dirty_cards(0), 1523 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1524 mark_stack, cl) { } 1525 1526 MarkFromDirtyCardsClosure(CMSCollector* collector, 1527 MemRegion span, 1528 CompactibleFreeListSpace* space, 1529 CMSBitMap* bit_map, 1530 OopTaskQueue* work_queue, 1531 Par_MarkRefsIntoAndScanClosure* cl): 1532 _space(space), 1533 _num_dirty_cards(0), 1534 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1535 work_queue, cl) { } 1536 1537 void do_MemRegion(MemRegion mr); 1538 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1539 size_t num_dirty_cards() { return _num_dirty_cards; } 1540 }; 1541 1542 // This closure is used in the non-product build to check 1543 // that there are no MemRegions with a certain property. 1544 class FalseMemRegionClosure: public MemRegionClosure { 1545 void do_MemRegion(MemRegion mr) { 1546 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1547 guarantee(false, "Should never be here"); 1548 } 1549 }; 1550 1551 // This closure is used during the precleaning phase 1552 // to "carefully" rescan marked objects on dirty cards. 1553 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1554 // to accomplish some of its work. 1555 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1556 CMSCollector* _collector; 1557 MemRegion _span; 1558 bool _yield; 1559 Mutex* _freelistLock; 1560 CMSBitMap* _bitMap; 1561 CMSMarkStack* _markStack; 1562 MarkRefsIntoAndScanClosure* _scanningClosure; 1563 1564 public: 1565 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1566 MemRegion span, 1567 CMSBitMap* bitMap, 1568 CMSMarkStack* markStack, 1569 MarkRefsIntoAndScanClosure* cl, 1570 bool should_yield): 1571 _collector(collector), 1572 _span(span), 1573 _yield(should_yield), 1574 _bitMap(bitMap), 1575 _markStack(markStack), 1576 _scanningClosure(cl) { 1577 } 1578 1579 void do_object(oop p) { 1580 guarantee(false, "call do_object_careful instead"); 1581 } 1582 1583 size_t do_object_careful(oop p) { 1584 guarantee(false, "Unexpected caller"); 1585 return 0; 1586 } 1587 1588 size_t do_object_careful_m(oop p, MemRegion mr); 1589 1590 void setFreelistLock(Mutex* m) { 1591 _freelistLock = m; 1592 _scanningClosure->set_freelistLock(m); 1593 } 1594 1595 private: 1596 inline bool do_yield_check(); 1597 1598 void do_yield_work(); 1599 }; 1600 1601 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1602 CMSCollector* _collector; 1603 MemRegion _span; 1604 bool _yield; 1605 CMSBitMap* _bit_map; 1606 CMSMarkStack* _mark_stack; 1607 PushAndMarkClosure* _scanning_closure; 1608 unsigned int _before_count; 1609 1610 public: 1611 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1612 MemRegion span, 1613 CMSBitMap* bit_map, 1614 CMSMarkStack* mark_stack, 1615 PushAndMarkClosure* cl, 1616 unsigned int before_count, 1617 bool should_yield): 1618 _collector(collector), 1619 _span(span), 1620 _yield(should_yield), 1621 _bit_map(bit_map), 1622 _mark_stack(mark_stack), 1623 _scanning_closure(cl), 1624 _before_count(before_count) 1625 { } 1626 1627 void do_object(oop p) { 1628 guarantee(false, "call do_object_careful instead"); 1629 } 1630 1631 size_t do_object_careful(oop p); 1632 1633 size_t do_object_careful_m(oop p, MemRegion mr) { 1634 guarantee(false, "Unexpected caller"); 1635 return 0; 1636 } 1637 1638 private: 1639 inline void do_yield_check(); 1640 void do_yield_work(); 1641 }; 1642 1643 // This closure is used to accomplish the sweeping work 1644 // after the second checkpoint but before the concurrent reset 1645 // phase. 1646 // 1647 // Terminology 1648 // left hand chunk (LHC) - block of one or more chunks currently being 1649 // coalesced. The LHC is available for coalescing with a new chunk. 1650 // right hand chunk (RHC) - block that is currently being swept that is 1651 // free or garbage that can be coalesced with the LHC. 1652 // _inFreeRange is true if there is currently a LHC 1653 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1654 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1655 // _freeFinger is the address of the current LHC 1656 class SweepClosure: public BlkClosureCareful { 1657 CMSCollector* _collector; // collector doing the work 1658 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1659 CompactibleFreeListSpace* _sp; // Space being swept 1660 HeapWord* _limit;// the address at or above which the sweep should stop 1661 // because we do not expect newly garbage blocks 1662 // eligible for sweeping past that address. 1663 Mutex* _freelistLock; // Free list lock (in space) 1664 CMSBitMap* _bitMap; // Marking bit map (in 1665 // generation) 1666 bool _inFreeRange; // Indicates if we are in the 1667 // midst of a free run 1668 bool _freeRangeInFreeLists; 1669 // Often, we have just found 1670 // a free chunk and started 1671 // a new free range; we do not 1672 // eagerly remove this chunk from 1673 // the free lists unless there is 1674 // a possibility of coalescing. 1675 // When true, this flag indicates 1676 // that the _freeFinger below 1677 // points to a potentially free chunk 1678 // that may still be in the free lists 1679 bool _lastFreeRangeCoalesced; 1680 // free range contains chunks 1681 // coalesced 1682 bool _yield; 1683 // Whether sweeping should be 1684 // done with yields. For instance 1685 // when done by the foreground 1686 // collector we shouldn't yield. 1687 HeapWord* _freeFinger; // When _inFreeRange is set, the 1688 // pointer to the "left hand 1689 // chunk" 1690 size_t _freeRangeSize; 1691 // When _inFreeRange is set, this 1692 // indicates the accumulated size 1693 // of the "left hand chunk" 1694 NOT_PRODUCT( 1695 size_t _numObjectsFreed; 1696 size_t _numWordsFreed; 1697 size_t _numObjectsLive; 1698 size_t _numWordsLive; 1699 size_t _numObjectsAlreadyFree; 1700 size_t _numWordsAlreadyFree; 1701 FreeChunk* _last_fc; 1702 ) 1703 private: 1704 // Code that is common to a free chunk or garbage when 1705 // encountered during sweeping. 1706 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1707 // Process a free chunk during sweeping. 1708 void do_already_free_chunk(FreeChunk *fc); 1709 // Work method called when processing an already free or a 1710 // freshly garbage chunk to do a lookahead and possibly a 1711 // preemptive flush if crossing over _limit. 1712 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1713 // Process a garbage chunk during sweeping. 1714 size_t do_garbage_chunk(FreeChunk *fc); 1715 // Process a live chunk during sweeping. 1716 size_t do_live_chunk(FreeChunk* fc); 1717 1718 // Accessors. 1719 HeapWord* freeFinger() const { return _freeFinger; } 1720 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1721 bool inFreeRange() const { return _inFreeRange; } 1722 void set_inFreeRange(bool v) { _inFreeRange = v; } 1723 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1724 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1725 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1726 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1727 1728 // Initialize a free range. 1729 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1730 // Return this chunk to the free lists. 1731 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1732 1733 // Check if we should yield and do so when necessary. 1734 inline void do_yield_check(HeapWord* addr); 1735 1736 // Yield 1737 void do_yield_work(HeapWord* addr); 1738 1739 // Debugging/Printing 1740 void print_free_block_coalesced(FreeChunk* fc) const; 1741 1742 public: 1743 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1744 CMSBitMap* bitMap, bool should_yield); 1745 ~SweepClosure() PRODUCT_RETURN; 1746 1747 size_t do_blk_careful(HeapWord* addr); 1748 void print() const { print_on(tty); } 1749 void print_on(outputStream *st) const; 1750 }; 1751 1752 // Closures related to weak references processing 1753 1754 // During CMS' weak reference processing, this is a 1755 // work-routine/closure used to complete transitive 1756 // marking of objects as live after a certain point 1757 // in which an initial set has been completely accumulated. 1758 // This closure is currently used both during the final 1759 // remark stop-world phase, as well as during the concurrent 1760 // precleaning of the discovered reference lists. 1761 class CMSDrainMarkingStackClosure: public VoidClosure { 1762 CMSCollector* _collector; 1763 MemRegion _span; 1764 CMSMarkStack* _mark_stack; 1765 CMSBitMap* _bit_map; 1766 CMSKeepAliveClosure* _keep_alive; 1767 bool _concurrent_precleaning; 1768 public: 1769 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1770 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1771 CMSKeepAliveClosure* keep_alive, 1772 bool cpc): 1773 _collector(collector), 1774 _span(span), 1775 _bit_map(bit_map), 1776 _mark_stack(mark_stack), 1777 _keep_alive(keep_alive), 1778 _concurrent_precleaning(cpc) { 1779 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1780 "Mismatch"); 1781 } 1782 1783 void do_void(); 1784 }; 1785 1786 // A parallel version of CMSDrainMarkingStackClosure above. 1787 class CMSParDrainMarkingStackClosure: public VoidClosure { 1788 CMSCollector* _collector; 1789 MemRegion _span; 1790 OopTaskQueue* _work_queue; 1791 CMSBitMap* _bit_map; 1792 CMSInnerParMarkAndPushClosure _mark_and_push; 1793 1794 public: 1795 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1796 MemRegion span, CMSBitMap* bit_map, 1797 OopTaskQueue* work_queue): 1798 _collector(collector), 1799 _span(span), 1800 _bit_map(bit_map), 1801 _work_queue(work_queue), 1802 _mark_and_push(collector, span, bit_map, work_queue) { } 1803 1804 public: 1805 void trim_queue(uint max); 1806 void do_void(); 1807 }; 1808 1809 // Allow yielding or short-circuiting of reference list 1810 // precleaning work. 1811 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1812 CMSCollector* _collector; 1813 void do_yield_work(); 1814 public: 1815 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1816 _collector(collector) {} 1817 virtual bool should_return(); 1818 }; 1819 1820 1821 // Convenience class that locks free list locks for given CMS collector 1822 class FreelistLocker: public StackObj { 1823 private: 1824 CMSCollector* _collector; 1825 public: 1826 FreelistLocker(CMSCollector* collector): 1827 _collector(collector) { 1828 _collector->getFreelistLocks(); 1829 } 1830 1831 ~FreelistLocker() { 1832 _collector->releaseFreelistLocks(); 1833 } 1834 }; 1835 1836 // Mark all dead objects in a given space. 1837 class MarkDeadObjectsClosure: public BlkClosure { 1838 const CMSCollector* _collector; 1839 const CompactibleFreeListSpace* _sp; 1840 CMSBitMap* _live_bit_map; 1841 CMSBitMap* _dead_bit_map; 1842 public: 1843 MarkDeadObjectsClosure(const CMSCollector* collector, 1844 const CompactibleFreeListSpace* sp, 1845 CMSBitMap *live_bit_map, 1846 CMSBitMap *dead_bit_map) : 1847 _collector(collector), 1848 _sp(sp), 1849 _live_bit_map(live_bit_map), 1850 _dead_bit_map(dead_bit_map) {} 1851 size_t do_blk(HeapWord* addr); 1852 }; 1853 1854 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1855 1856 public: 1857 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1858 }; 1859 1860 1861 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP