1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/gcWhen.hpp" 32 #include "gc_implementation/shared/generationCounters.hpp" 33 #include "memory/freeBlockDictionary.hpp" 34 #include "memory/generation.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/virtualspace.hpp" 37 #include "services/memoryService.hpp" 38 #include "utilities/bitMap.inline.hpp" 39 #include "utilities/stack.inline.hpp" 40 #include "utilities/taskqueue.hpp" 41 #include "utilities/yieldingWorkgroup.hpp" 42 43 // ConcurrentMarkSweepGeneration is in support of a concurrent 44 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 45 // style. We assume, for now, that this generation is always the 46 // seniormost generation (modulo the PermGeneration), and for simplicity 47 // in the first implementation, that this generation is a single compactible 48 // space. Neither of these restrictions appears essential, and will be 49 // relaxed in the future when more time is available to implement the 50 // greater generality (and there's a need for it). 51 // 52 // Concurrent mode failures are currently handled by 53 // means of a sliding mark-compact. 54 55 class CMSAdaptiveSizePolicy; 56 class CMSConcMarkingTask; 57 class CMSGCAdaptivePolicyCounters; 58 class CMSTracer; 59 class ConcurrentGCTimer; 60 class ConcurrentMarkSweepGeneration; 61 class ConcurrentMarkSweepPolicy; 62 class ConcurrentMarkSweepThread; 63 class CompactibleFreeListSpace; 64 class FreeChunk; 65 class PromotionInfo; 66 class ScanMarkedObjectsAgainCarefullyClosure; 67 class SerialOldTracer; 68 69 // A generic CMS bit map. It's the basis for both the CMS marking bit map 70 // as well as for the mod union table (in each case only a subset of the 71 // methods are used). This is essentially a wrapper around the BitMap class, 72 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 73 // we have _shifter == 0. and for the mod union table we have 74 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 75 // XXX 64-bit issues in BitMap? 76 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 77 friend class VMStructs; 78 79 HeapWord* _bmStartWord; // base address of range covered by map 80 size_t _bmWordSize; // map size (in #HeapWords covered) 81 const int _shifter; // shifts to convert HeapWord to bit position 82 VirtualSpace _virtual_space; // underlying the bit map 83 BitMap _bm; // the bit map itself 84 public: 85 Mutex* const _lock; // mutex protecting _bm; 86 87 public: 88 // constructor 89 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 90 91 // allocates the actual storage for the map 92 bool allocate(MemRegion mr); 93 // field getter 94 Mutex* lock() const { return _lock; } 95 // locking verifier convenience function 96 void assert_locked() const PRODUCT_RETURN; 97 98 // inquiries 99 HeapWord* startWord() const { return _bmStartWord; } 100 size_t sizeInWords() const { return _bmWordSize; } 101 size_t sizeInBits() const { return _bm.size(); } 102 // the following is one past the last word in space 103 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 104 105 // reading marks 106 bool isMarked(HeapWord* addr) const; 107 bool par_isMarked(HeapWord* addr) const; // do not lock checks 108 bool isUnmarked(HeapWord* addr) const; 109 bool isAllClear() const; 110 111 // writing marks 112 void mark(HeapWord* addr); 113 // For marking by parallel GC threads; 114 // returns true if we did, false if another thread did 115 bool par_mark(HeapWord* addr); 116 117 void mark_range(MemRegion mr); 118 void par_mark_range(MemRegion mr); 119 void mark_large_range(MemRegion mr); 120 void par_mark_large_range(MemRegion mr); 121 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 122 void clear_range(MemRegion mr); 123 void par_clear_range(MemRegion mr); 124 void clear_large_range(MemRegion mr); 125 void par_clear_large_range(MemRegion mr); 126 void clear_all(); 127 void clear_all_incrementally(); // Not yet implemented!! 128 129 NOT_PRODUCT( 130 // checks the memory region for validity 131 void region_invariant(MemRegion mr); 132 ) 133 134 // iteration 135 void iterate(BitMapClosure* cl) { 136 _bm.iterate(cl); 137 } 138 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 139 void dirty_range_iterate_clear(MemRegionClosure* cl); 140 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 141 142 // auxiliary support for iteration 143 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 144 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 145 HeapWord* end_addr) const; 146 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 147 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 148 HeapWord* end_addr) const; 149 MemRegion getAndClearMarkedRegion(HeapWord* addr); 150 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 151 HeapWord* end_addr); 152 153 // conversion utilities 154 HeapWord* offsetToHeapWord(size_t offset) const; 155 size_t heapWordToOffset(HeapWord* addr) const; 156 size_t heapWordDiffToOffsetDiff(size_t diff) const; 157 158 // debugging 159 // is this address range covered by the bit-map? 160 NOT_PRODUCT( 161 bool covers(MemRegion mr) const; 162 bool covers(HeapWord* start, size_t size = 0) const; 163 ) 164 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 165 }; 166 167 // Represents a marking stack used by the CMS collector. 168 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 169 class CMSMarkStack: public CHeapObj<mtGC> { 170 // 171 friend class CMSCollector; // to get at expasion stats further below 172 // 173 174 VirtualSpace _virtual_space; // space for the stack 175 oop* _base; // bottom of stack 176 size_t _index; // one more than last occupied index 177 size_t _capacity; // max #elements 178 Mutex _par_lock; // an advisory lock used in case of parallel access 179 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 180 181 protected: 182 size_t _hit_limit; // we hit max stack size limit 183 size_t _failed_double; // we failed expansion before hitting limit 184 185 public: 186 CMSMarkStack(): 187 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 188 _hit_limit(0), 189 _failed_double(0) {} 190 191 bool allocate(size_t size); 192 193 size_t capacity() const { return _capacity; } 194 195 oop pop() { 196 if (!isEmpty()) { 197 return _base[--_index] ; 198 } 199 return NULL; 200 } 201 202 bool push(oop ptr) { 203 if (isFull()) { 204 return false; 205 } else { 206 _base[_index++] = ptr; 207 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 208 return true; 209 } 210 } 211 212 bool isEmpty() const { return _index == 0; } 213 bool isFull() const { 214 assert(_index <= _capacity, "buffer overflow"); 215 return _index == _capacity; 216 } 217 218 size_t length() { return _index; } 219 220 // "Parallel versions" of some of the above 221 oop par_pop() { 222 // lock and pop 223 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 224 return pop(); 225 } 226 227 bool par_push(oop ptr) { 228 // lock and push 229 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 230 return push(ptr); 231 } 232 233 // Forcibly reset the stack, losing all of its contents. 234 void reset() { 235 _index = 0; 236 } 237 238 // Expand the stack, typically in response to an overflow condition 239 void expand(); 240 241 // Compute the least valued stack element. 242 oop least_value(HeapWord* low) { 243 oop least = (oop)low; 244 for (size_t i = 0; i < _index; i++) { 245 least = MIN2(least, _base[i]); 246 } 247 return least; 248 } 249 250 // Exposed here to allow stack expansion in || case 251 Mutex* par_lock() { return &_par_lock; } 252 }; 253 254 class CardTableRS; 255 class CMSParGCThreadState; 256 257 class ModUnionClosure: public MemRegionClosure { 258 protected: 259 CMSBitMap* _t; 260 public: 261 ModUnionClosure(CMSBitMap* t): _t(t) { } 262 void do_MemRegion(MemRegion mr); 263 }; 264 265 class ModUnionClosurePar: public ModUnionClosure { 266 public: 267 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 268 void do_MemRegion(MemRegion mr); 269 }; 270 271 // Survivor Chunk Array in support of parallelization of 272 // Survivor Space rescan. 273 class ChunkArray: public CHeapObj<mtGC> { 274 size_t _index; 275 size_t _capacity; 276 size_t _overflows; 277 HeapWord** _array; // storage for array 278 279 public: 280 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 281 ChunkArray(HeapWord** a, size_t c): 282 _index(0), _capacity(c), _overflows(0), _array(a) {} 283 284 HeapWord** array() { return _array; } 285 void set_array(HeapWord** a) { _array = a; } 286 287 size_t capacity() { return _capacity; } 288 void set_capacity(size_t c) { _capacity = c; } 289 290 size_t end() { 291 assert(_index <= capacity(), 292 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 293 _index, _capacity)); 294 return _index; 295 } // exclusive 296 297 HeapWord* nth(size_t n) { 298 assert(n < end(), "Out of bounds access"); 299 return _array[n]; 300 } 301 302 void reset() { 303 _index = 0; 304 if (_overflows > 0 && PrintCMSStatistics > 1) { 305 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 306 _capacity, _overflows); 307 } 308 _overflows = 0; 309 } 310 311 void record_sample(HeapWord* p, size_t sz) { 312 // For now we do not do anything with the size 313 if (_index < _capacity) { 314 _array[_index++] = p; 315 } else { 316 ++_overflows; 317 assert(_index == _capacity, 318 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 319 "): out of bounds at overflow#" SIZE_FORMAT, 320 _index, _capacity, _overflows)); 321 } 322 } 323 }; 324 325 // 326 // Timing, allocation and promotion statistics for gc scheduling and incremental 327 // mode pacing. Most statistics are exponential averages. 328 // 329 class CMSStats VALUE_OBJ_CLASS_SPEC { 330 private: 331 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 332 333 // The following are exponential averages with factor alpha: 334 // avg = (100 - alpha) * avg + alpha * cur_sample 335 // 336 // The durations measure: end_time[n] - start_time[n] 337 // The periods measure: start_time[n] - start_time[n-1] 338 // 339 // The cms period and duration include only concurrent collections; time spent 340 // in foreground cms collections due to System.gc() or because of a failure to 341 // keep up are not included. 342 // 343 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 344 // real value, but is used only after the first period. A value of 100 is 345 // used for the first sample so it gets the entire weight. 346 unsigned int _saved_alpha; // 0-100 347 unsigned int _gc0_alpha; 348 unsigned int _cms_alpha; 349 350 double _gc0_duration; 351 double _gc0_period; 352 size_t _gc0_promoted; // bytes promoted per gc0 353 double _cms_duration; 354 double _cms_duration_pre_sweep; // time from initiation to start of sweep 355 double _cms_duration_per_mb; 356 double _cms_period; 357 size_t _cms_allocated; // bytes of direct allocation per gc0 period 358 359 // Timers. 360 elapsedTimer _cms_timer; 361 TimeStamp _gc0_begin_time; 362 TimeStamp _cms_begin_time; 363 TimeStamp _cms_end_time; 364 365 // Snapshots of the amount used in the CMS generation. 366 size_t _cms_used_at_gc0_begin; 367 size_t _cms_used_at_gc0_end; 368 size_t _cms_used_at_cms_begin; 369 370 // Used to prevent the duty cycle from being reduced in the middle of a cms 371 // cycle. 372 bool _allow_duty_cycle_reduction; 373 374 enum { 375 _GC0_VALID = 0x1, 376 _CMS_VALID = 0x2, 377 _ALL_VALID = _GC0_VALID | _CMS_VALID 378 }; 379 380 unsigned int _valid_bits; 381 382 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 383 384 protected: 385 386 // Return a duty cycle that avoids wild oscillations, by limiting the amount 387 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 388 // as a recommended value). 389 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 390 unsigned int new_duty_cycle); 391 unsigned int icms_update_duty_cycle_impl(); 392 393 // In support of adjusting of cms trigger ratios based on history 394 // of concurrent mode failure. 395 double cms_free_adjustment_factor(size_t free) const; 396 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 397 398 public: 399 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 400 unsigned int alpha = CMSExpAvgFactor); 401 402 // Whether or not the statistics contain valid data; higher level statistics 403 // cannot be called until this returns true (they require at least one young 404 // gen and one cms cycle to have completed). 405 bool valid() const; 406 407 // Record statistics. 408 void record_gc0_begin(); 409 void record_gc0_end(size_t cms_gen_bytes_used); 410 void record_cms_begin(); 411 void record_cms_end(); 412 413 // Allow management of the cms timer, which must be stopped/started around 414 // yield points. 415 elapsedTimer& cms_timer() { return _cms_timer; } 416 void start_cms_timer() { _cms_timer.start(); } 417 void stop_cms_timer() { _cms_timer.stop(); } 418 419 // Basic statistics; units are seconds or bytes. 420 double gc0_period() const { return _gc0_period; } 421 double gc0_duration() const { return _gc0_duration; } 422 size_t gc0_promoted() const { return _gc0_promoted; } 423 double cms_period() const { return _cms_period; } 424 double cms_duration() const { return _cms_duration; } 425 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 426 size_t cms_allocated() const { return _cms_allocated; } 427 428 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 429 430 // Seconds since the last background cms cycle began or ended. 431 double cms_time_since_begin() const; 432 double cms_time_since_end() const; 433 434 // Higher level statistics--caller must check that valid() returns true before 435 // calling. 436 437 // Returns bytes promoted per second of wall clock time. 438 double promotion_rate() const; 439 440 // Returns bytes directly allocated per second of wall clock time. 441 double cms_allocation_rate() const; 442 443 // Rate at which space in the cms generation is being consumed (sum of the 444 // above two). 445 double cms_consumption_rate() const; 446 447 // Returns an estimate of the number of seconds until the cms generation will 448 // fill up, assuming no collection work is done. 449 double time_until_cms_gen_full() const; 450 451 // Returns an estimate of the number of seconds remaining until 452 // the cms generation collection should start. 453 double time_until_cms_start() const; 454 455 // End of higher level statistics. 456 457 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 458 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 459 460 // Update the duty cycle and return the new value. 461 unsigned int icms_update_duty_cycle(); 462 463 // Debugging. 464 void print_on(outputStream* st) const PRODUCT_RETURN; 465 void print() const { print_on(gclog_or_tty); } 466 }; 467 468 // A closure related to weak references processing which 469 // we embed in the CMSCollector, since we need to pass 470 // it to the reference processor for secondary filtering 471 // of references based on reachability of referent; 472 // see role of _is_alive_non_header closure in the 473 // ReferenceProcessor class. 474 // For objects in the CMS generation, this closure checks 475 // if the object is "live" (reachable). Used in weak 476 // reference processing. 477 class CMSIsAliveClosure: public BoolObjectClosure { 478 const MemRegion _span; 479 const CMSBitMap* _bit_map; 480 481 friend class CMSCollector; 482 public: 483 CMSIsAliveClosure(MemRegion span, 484 CMSBitMap* bit_map): 485 _span(span), 486 _bit_map(bit_map) { 487 assert(!span.is_empty(), "Empty span could spell trouble"); 488 } 489 490 void do_object(oop obj) { 491 assert(false, "not to be invoked"); 492 } 493 494 bool do_object_b(oop obj); 495 }; 496 497 498 // Implements AbstractRefProcTaskExecutor for CMS. 499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 500 public: 501 502 CMSRefProcTaskExecutor(CMSCollector& collector) 503 : _collector(collector) 504 { } 505 506 // Executes a task using worker threads. 507 virtual void execute(ProcessTask& task); 508 virtual void execute(EnqueueTask& task); 509 private: 510 CMSCollector& _collector; 511 }; 512 513 514 class CMSCollector: public CHeapObj<mtGC> { 515 friend class VMStructs; 516 friend class ConcurrentMarkSweepThread; 517 friend class ConcurrentMarkSweepGeneration; 518 friend class CompactibleFreeListSpace; 519 friend class CMSParMarkTask; 520 friend class CMSParInitialMarkTask; 521 friend class CMSParRemarkTask; 522 friend class CMSConcMarkingTask; 523 friend class CMSRefProcTaskProxy; 524 friend class CMSRefProcTaskExecutor; 525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 526 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 527 friend class PushOrMarkClosure; // to access _restart_addr 528 friend class Par_PushOrMarkClosure; // to access _restart_addr 529 friend class MarkFromRootsClosure; // -- ditto -- 530 // ... and for clearing cards 531 friend class Par_MarkFromRootsClosure; // to access _restart_addr 532 // ... and for clearing cards 533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 535 friend class PushAndMarkVerifyClosure; // -- ditto -- 536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 537 friend class PushAndMarkClosure; // -- ditto -- 538 friend class Par_PushAndMarkClosure; // -- ditto -- 539 friend class CMSKeepAliveClosure; // -- ditto -- 540 friend class CMSDrainMarkingStackClosure; // -- ditto -- 541 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 544 friend class VM_CMS_Operation; 545 friend class VM_CMS_Initial_Mark; 546 friend class VM_CMS_Final_Remark; 547 friend class TraceCMSMemoryManagerStats; 548 549 private: 550 jlong _time_of_last_gc; 551 void update_time_of_last_gc(jlong now) { 552 _time_of_last_gc = now; 553 } 554 555 OopTaskQueueSet* _task_queues; 556 557 // Overflow list of grey objects, threaded through mark-word 558 // Manipulated with CAS in the parallel/multi-threaded case. 559 oop _overflow_list; 560 // The following array-pair keeps track of mark words 561 // displaced for accomodating overflow list above. 562 // This code will likely be revisited under RFE#4922830. 563 Stack<oop, mtGC> _preserved_oop_stack; 564 Stack<markOop, mtGC> _preserved_mark_stack; 565 566 int* _hash_seed; 567 568 // In support of multi-threaded concurrent phases 569 YieldingFlexibleWorkGang* _conc_workers; 570 571 // Performance Counters 572 CollectorCounters* _gc_counters; 573 574 // Initialization Errors 575 bool _completed_initialization; 576 577 // In support of ExplicitGCInvokesConcurrent 578 static bool _full_gc_requested; 579 static GCCause::Cause _full_gc_cause; 580 unsigned int _collection_count_start; 581 582 // Should we unload classes this concurrent cycle? 583 bool _should_unload_classes; 584 unsigned int _concurrent_cycles_since_last_unload; 585 unsigned int concurrent_cycles_since_last_unload() const { 586 return _concurrent_cycles_since_last_unload; 587 } 588 // Did we (allow) unload classes in the previous concurrent cycle? 589 bool unloaded_classes_last_cycle() const { 590 return concurrent_cycles_since_last_unload() == 0; 591 } 592 // Root scanning options for perm gen 593 int _roots_scanning_options; 594 int roots_scanning_options() const { return _roots_scanning_options; } 595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 597 598 // Verification support 599 CMSBitMap _verification_mark_bm; 600 void verify_after_remark_work_1(); 601 void verify_after_remark_work_2(); 602 603 // true if any verification flag is on. 604 bool _verifying; 605 bool verifying() const { return _verifying; } 606 void set_verifying(bool v) { _verifying = v; } 607 608 // Collector policy 609 ConcurrentMarkSweepPolicy* _collector_policy; 610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 611 612 // XXX Move these to CMSStats ??? FIX ME !!! 613 elapsedTimer _inter_sweep_timer; // time between sweeps 614 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 615 // padded decaying average estimates of the above 616 AdaptivePaddedAverage _inter_sweep_estimate; 617 AdaptivePaddedAverage _intra_sweep_estimate; 618 619 CMSTracer* _gc_tracer_cm; 620 ConcurrentGCTimer* _gc_timer_cm; 621 622 bool _cms_start_registered; 623 624 GCHeapSummary _last_heap_summary; 625 PermGenSummary _last_perm_gen_summary; 626 627 void register_foreground_gc_start(GCCause::Cause cause); 628 void register_gc_start(GCCause::Cause cause); 629 void register_gc_end(); 630 void save_heap_summary(); 631 void report_heap_summary(GCWhen::Type when); 632 633 protected: 634 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 635 ConcurrentMarkSweepGeneration* _permGen; // perm gen 636 MemRegion _span; // span covering above two 637 CardTableRS* _ct; // card table 638 639 // CMS marking support structures 640 CMSBitMap _markBitMap; 641 CMSBitMap _modUnionTable; 642 CMSMarkStack _markStack; 643 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects 644 // to revisit 645 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. 646 647 HeapWord* _restart_addr; // in support of marking stack overflow 648 void lower_restart_addr(HeapWord* low); 649 650 // Counters in support of marking stack / work queue overflow handling: 651 // a non-zero value indicates certain types of overflow events during 652 // the current CMS cycle and could lead to stack resizing efforts at 653 // an opportune future time. 654 size_t _ser_pmc_preclean_ovflw; 655 size_t _ser_pmc_remark_ovflw; 656 size_t _par_pmc_remark_ovflw; 657 size_t _ser_kac_preclean_ovflw; 658 size_t _ser_kac_ovflw; 659 size_t _par_kac_ovflw; 660 NOT_PRODUCT(ssize_t _num_par_pushes;) 661 662 // ("Weak") Reference processing support 663 ReferenceProcessor* _ref_processor; 664 CMSIsAliveClosure _is_alive_closure; 665 // keep this textually after _markBitMap and _span; c'tor dependency 666 667 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 668 ModUnionClosure _modUnionClosure; 669 ModUnionClosurePar _modUnionClosurePar; 670 671 // CMS abstract state machine 672 // initial_state: Idling 673 // next_state(Idling) = {Marking} 674 // next_state(Marking) = {Precleaning, Sweeping} 675 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 676 // next_state(AbortablePreclean) = {FinalMarking} 677 // next_state(FinalMarking) = {Sweeping} 678 // next_state(Sweeping) = {Resizing} 679 // next_state(Resizing) = {Resetting} 680 // next_state(Resetting) = {Idling} 681 // The numeric values below are chosen so that: 682 // . _collectorState <= Idling == post-sweep && pre-mark 683 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 684 // precleaning || abortablePrecleanb 685 public: 686 enum CollectorState { 687 Resizing = 0, 688 Resetting = 1, 689 Idling = 2, 690 InitialMarking = 3, 691 Marking = 4, 692 Precleaning = 5, 693 AbortablePreclean = 6, 694 FinalMarking = 7, 695 Sweeping = 8 696 }; 697 protected: 698 static CollectorState _collectorState; 699 700 // State related to prologue/epilogue invocation for my generations 701 bool _between_prologue_and_epilogue; 702 703 // Signalling/State related to coordination between fore- and backgroud GC 704 // Note: When the baton has been passed from background GC to foreground GC, 705 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 706 static bool _foregroundGCIsActive; // true iff foreground collector is active or 707 // wants to go active 708 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 709 // yet passed the baton to the foreground GC 710 711 // Support for CMSScheduleRemark (abortable preclean) 712 bool _abort_preclean; 713 bool _start_sampling; 714 715 int _numYields; 716 size_t _numDirtyCards; 717 size_t _sweep_count; 718 // number of full gc's since the last concurrent gc. 719 uint _full_gcs_since_conc_gc; 720 721 // occupancy used for bootstrapping stats 722 double _bootstrap_occupancy; 723 724 // timer 725 elapsedTimer _timer; 726 727 // Timing, allocation and promotion statistics, used for scheduling. 728 CMSStats _stats; 729 730 // Allocation limits installed in the young gen, used only in 731 // CMSIncrementalMode. When an allocation in the young gen would cross one of 732 // these limits, the cms generation is notified and the cms thread is started 733 // or stopped, respectively. 734 HeapWord* _icms_start_limit; 735 HeapWord* _icms_stop_limit; 736 737 enum CMS_op_type { 738 CMS_op_checkpointRootsInitial, 739 CMS_op_checkpointRootsFinal 740 }; 741 742 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 743 bool stop_world_and_do(CMS_op_type op); 744 745 OopTaskQueueSet* task_queues() { return _task_queues; } 746 int* hash_seed(int i) { return &_hash_seed[i]; } 747 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 748 749 // Support for parallelizing Eden rescan in CMS remark phase 750 void sample_eden(); // ... sample Eden space top 751 752 private: 753 // Support for parallelizing young gen rescan in CMS remark phase 754 Generation* _young_gen; // the younger gen 755 HeapWord** _top_addr; // ... Top of Eden 756 HeapWord** _end_addr; // ... End of Eden 757 Mutex* _eden_chunk_lock; 758 HeapWord** _eden_chunk_array; // ... Eden partitioning array 759 size_t _eden_chunk_index; // ... top (exclusive) of array 760 size_t _eden_chunk_capacity; // ... max entries in array 761 762 // Support for parallelizing survivor space rescan 763 HeapWord** _survivor_chunk_array; 764 size_t _survivor_chunk_index; 765 size_t _survivor_chunk_capacity; 766 size_t* _cursor; 767 ChunkArray* _survivor_plab_array; 768 769 // Support for marking stack overflow handling 770 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 771 bool par_take_from_overflow_list(size_t num, 772 OopTaskQueue* to_work_q, 773 int no_of_gc_threads); 774 void push_on_overflow_list(oop p); 775 void par_push_on_overflow_list(oop p); 776 // the following is, obviously, not, in general, "MT-stable" 777 bool overflow_list_is_empty() const; 778 779 void preserve_mark_if_necessary(oop p); 780 void par_preserve_mark_if_necessary(oop p); 781 void preserve_mark_work(oop p, markOop m); 782 void restore_preserved_marks_if_any(); 783 NOT_PRODUCT(bool no_preserved_marks() const;) 784 // in support of testing overflow code 785 NOT_PRODUCT(int _overflow_counter;) 786 NOT_PRODUCT(bool simulate_overflow();) // sequential 787 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 788 789 // CMS work methods 790 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 791 792 // a return value of false indicates failure due to stack overflow 793 bool markFromRootsWork(bool asynch); // concurrent marking work 794 795 public: // FIX ME!!! only for testing 796 bool do_marking_st(bool asynch); // single-threaded marking 797 bool do_marking_mt(bool asynch); // multi-threaded marking 798 799 private: 800 801 // concurrent precleaning work 802 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 803 ScanMarkedObjectsAgainCarefullyClosure* cl); 804 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 805 ScanMarkedObjectsAgainCarefullyClosure* cl); 806 // Does precleaning work, returning a quantity indicative of 807 // the amount of "useful work" done. 808 size_t preclean_work(bool clean_refs, bool clean_survivors); 809 void abortable_preclean(); // Preclean while looking for possible abort 810 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 811 // Helper function for above; merge-sorts the per-thread plab samples 812 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 813 // Resets (i.e. clears) the per-thread plab sample vectors 814 void reset_survivor_plab_arrays(); 815 816 // final (second) checkpoint work 817 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 818 bool init_mark_was_synchronous); 819 // work routine for parallel version of remark 820 void do_remark_parallel(); 821 // work routine for non-parallel version of remark 822 void do_remark_non_parallel(); 823 // reference processing work routine (during second checkpoint) 824 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 825 826 // concurrent sweeping work 827 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 828 829 // (concurrent) resetting of support data structures 830 void reset(bool asynch); 831 832 // Clear _expansion_cause fields of constituent generations 833 void clear_expansion_cause(); 834 835 // An auxilliary method used to record the ends of 836 // used regions of each generation to limit the extent of sweep 837 void save_sweep_limits(); 838 839 // Resize the generations included in the collector. 840 void compute_new_size(); 841 842 // A work method used by foreground collection to determine 843 // what type of collection (compacting or not, continuing or fresh) 844 // it should do. 845 void decide_foreground_collection_type(bool clear_all_soft_refs, 846 bool* should_compact, bool* should_start_over); 847 848 // A work method used by the foreground collector to do 849 // a mark-sweep-compact. 850 void do_compaction_work(bool clear_all_soft_refs); 851 852 // A work method used by the foreground collector to do 853 // a mark-sweep, after taking over from a possibly on-going 854 // concurrent mark-sweep collection. 855 void do_mark_sweep_work(bool clear_all_soft_refs, 856 CollectorState first_state, bool should_start_over); 857 858 // Work methods for reporting concurrent mode interruption or failure 859 bool is_external_interruption(); 860 void report_concurrent_mode_interruption(); 861 862 // If the backgrould GC is active, acquire control from the background 863 // GC and do the collection. 864 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 865 866 // For synchronizing passing of control from background to foreground 867 // GC. waitForForegroundGC() is called by the background 868 // collector. It if had to wait for a foreground collection, 869 // it returns true and the background collection should assume 870 // that the collection was finished by the foreground 871 // collector. 872 bool waitForForegroundGC(); 873 874 // Incremental mode triggering: recompute the icms duty cycle and set the 875 // allocation limits in the young gen. 876 void icms_update_allocation_limits(); 877 878 size_t block_size_using_printezis_bits(HeapWord* addr) const; 879 size_t block_size_if_printezis_bits(HeapWord* addr) const; 880 HeapWord* next_card_start_after_block(HeapWord* addr) const; 881 882 void setup_cms_unloading_and_verification_state(); 883 public: 884 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 885 ConcurrentMarkSweepGeneration* permGen, 886 CardTableRS* ct, 887 ConcurrentMarkSweepPolicy* cp); 888 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 889 890 ReferenceProcessor* ref_processor() { return _ref_processor; } 891 void ref_processor_init(); 892 893 Mutex* bitMapLock() const { return _markBitMap.lock(); } 894 static CollectorState abstract_state() { return _collectorState; } 895 896 bool should_abort_preclean() const; // Whether preclean should be aborted. 897 size_t get_eden_used() const; 898 size_t get_eden_capacity() const; 899 900 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 901 902 // locking checks 903 NOT_PRODUCT(static bool have_cms_token();) 904 905 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 906 bool shouldConcurrentCollect(); 907 908 void collect(bool full, 909 bool clear_all_soft_refs, 910 size_t size, 911 bool tlab); 912 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); 913 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); 914 915 // In support of ExplicitGCInvokesConcurrent 916 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 917 // Should we unload classes in a particular concurrent cycle? 918 bool should_unload_classes() const { 919 return _should_unload_classes; 920 } 921 bool update_should_unload_classes(); 922 923 void direct_allocated(HeapWord* start, size_t size); 924 925 // Object is dead if not marked and current phase is sweeping. 926 bool is_dead_obj(oop obj) const; 927 928 // After a promotion (of "start"), do any necessary marking. 929 // If "par", then it's being done by a parallel GC thread. 930 // The last two args indicate if we need precise marking 931 // and if so the size of the object so it can be dirtied 932 // in its entirety. 933 void promoted(bool par, HeapWord* start, 934 bool is_obj_array, size_t obj_size); 935 936 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 937 size_t word_size); 938 939 void getFreelistLocks() const; 940 void releaseFreelistLocks() const; 941 bool haveFreelistLocks() const; 942 943 // GC prologue and epilogue 944 void gc_prologue(bool full); 945 void gc_epilogue(bool full); 946 947 jlong time_of_last_gc(jlong now) { 948 if (_collectorState <= Idling) { 949 // gc not in progress 950 return _time_of_last_gc; 951 } else { 952 // collection in progress 953 return now; 954 } 955 } 956 957 // Support for parallel remark of survivor space 958 void* get_data_recorder(int thr_num); 959 void sample_eden_chunk(); 960 961 CMSBitMap* markBitMap() { return &_markBitMap; } 962 void directAllocated(HeapWord* start, size_t size); 963 964 // main CMS steps and related support 965 void checkpointRootsInitial(bool asynch); 966 bool markFromRoots(bool asynch); // a return value of false indicates failure 967 // due to stack overflow 968 void preclean(); 969 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 970 bool init_mark_was_synchronous); 971 void sweep(bool asynch); 972 973 // Check that the currently executing thread is the expected 974 // one (foreground collector or background collector). 975 static void check_correct_thread_executing() PRODUCT_RETURN; 976 // XXXPERM void print_statistics() PRODUCT_RETURN; 977 978 bool is_cms_reachable(HeapWord* addr); 979 980 // Performance Counter Support 981 CollectorCounters* counters() { return _gc_counters; } 982 983 // timer stuff 984 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 985 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 986 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 987 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 988 989 int yields() { return _numYields; } 990 void resetYields() { _numYields = 0; } 991 void incrementYields() { _numYields++; } 992 void resetNumDirtyCards() { _numDirtyCards = 0; } 993 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 994 size_t numDirtyCards() { return _numDirtyCards; } 995 996 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 997 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 998 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 999 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 1000 size_t sweep_count() const { return _sweep_count; } 1001 void increment_sweep_count() { _sweep_count++; } 1002 1003 // Timers/stats for gc scheduling and incremental mode pacing. 1004 CMSStats& stats() { return _stats; } 1005 1006 // Convenience methods that check whether CMSIncrementalMode is enabled and 1007 // forward to the corresponding methods in ConcurrentMarkSweepThread. 1008 static void start_icms(); 1009 static void stop_icms(); // Called at the end of the cms cycle. 1010 static void disable_icms(); // Called before a foreground collection. 1011 static void enable_icms(); // Called after a foreground collection. 1012 void icms_wait(); // Called at yield points. 1013 1014 // Adaptive size policy 1015 CMSAdaptiveSizePolicy* size_policy(); 1016 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1017 1018 // debugging 1019 void verify(); 1020 bool verify_after_remark(); 1021 void verify_ok_to_terminate() const PRODUCT_RETURN; 1022 void verify_work_stacks_empty() const PRODUCT_RETURN; 1023 void verify_overflow_empty() const PRODUCT_RETURN; 1024 1025 // convenience methods in support of debugging 1026 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 1027 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 1028 1029 // accessors 1030 CMSMarkStack* verification_mark_stack() { return &_markStack; } 1031 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 1032 1033 // Get the bit map with a perm gen "deadness" information. 1034 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } 1035 1036 // Initialization errors 1037 bool completed_initialization() { return _completed_initialization; } 1038 1039 void print_eden_and_survivor_chunk_arrays(); 1040 }; 1041 1042 class CMSExpansionCause : public AllStatic { 1043 public: 1044 enum Cause { 1045 _no_expansion, 1046 _satisfy_free_ratio, 1047 _satisfy_promotion, 1048 _satisfy_allocation, 1049 _allocate_par_lab, 1050 _allocate_par_spooling_space, 1051 _adaptive_size_policy 1052 }; 1053 // Return a string describing the cause of the expansion. 1054 static const char* to_string(CMSExpansionCause::Cause cause); 1055 }; 1056 1057 class ConcurrentMarkSweepGeneration: public CardGeneration { 1058 friend class VMStructs; 1059 friend class ConcurrentMarkSweepThread; 1060 friend class ConcurrentMarkSweep; 1061 friend class CMSCollector; 1062 protected: 1063 static CMSCollector* _collector; // the collector that collects us 1064 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1065 1066 // Performance Counters 1067 GenerationCounters* _gen_counters; 1068 GSpaceCounters* _space_counters; 1069 1070 // Words directly allocated, used by CMSStats. 1071 size_t _direct_allocated_words; 1072 1073 // Non-product stat counters 1074 NOT_PRODUCT( 1075 size_t _numObjectsPromoted; 1076 size_t _numWordsPromoted; 1077 size_t _numObjectsAllocated; 1078 size_t _numWordsAllocated; 1079 ) 1080 1081 // Used for sizing decisions 1082 bool _incremental_collection_failed; 1083 bool incremental_collection_failed() { 1084 return _incremental_collection_failed; 1085 } 1086 void set_incremental_collection_failed() { 1087 _incremental_collection_failed = true; 1088 } 1089 void clear_incremental_collection_failed() { 1090 _incremental_collection_failed = false; 1091 } 1092 1093 // accessors 1094 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1095 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1096 1097 private: 1098 // For parallel young-gen GC support. 1099 CMSParGCThreadState** _par_gc_thread_states; 1100 1101 // Reason generation was expanded 1102 CMSExpansionCause::Cause _expansion_cause; 1103 1104 // In support of MinChunkSize being larger than min object size 1105 const double _dilatation_factor; 1106 1107 enum CollectionTypes { 1108 Concurrent_collection_type = 0, 1109 MS_foreground_collection_type = 1, 1110 MSC_foreground_collection_type = 2, 1111 Unknown_collection_type = 3 1112 }; 1113 1114 CollectionTypes _debug_collection_type; 1115 1116 // Fraction of current occupancy at which to start a CMS collection which 1117 // will collect this generation (at least). 1118 double _initiating_occupancy; 1119 1120 protected: 1121 // Shrink generation by specified size (returns false if unable to shrink) 1122 virtual void shrink_by(size_t bytes); 1123 1124 // Update statistics for GC 1125 virtual void update_gc_stats(int level, bool full); 1126 1127 // Maximum available space in the generation (including uncommitted) 1128 // space. 1129 size_t max_available() const; 1130 1131 // getter and initializer for _initiating_occupancy field. 1132 double initiating_occupancy() const { return _initiating_occupancy; } 1133 void init_initiating_occupancy(intx io, intx tr); 1134 1135 public: 1136 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1137 int level, CardTableRS* ct, 1138 bool use_adaptive_freelists, 1139 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1140 1141 // Accessors 1142 CMSCollector* collector() const { return _collector; } 1143 static void set_collector(CMSCollector* collector) { 1144 assert(_collector == NULL, "already set"); 1145 _collector = collector; 1146 } 1147 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1148 1149 Mutex* freelistLock() const; 1150 1151 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1152 1153 // Adaptive size policy 1154 CMSAdaptiveSizePolicy* size_policy(); 1155 1156 bool refs_discovery_is_atomic() const { return false; } 1157 bool refs_discovery_is_mt() const { 1158 // Note: CMS does MT-discovery during the parallel-remark 1159 // phases. Use ReferenceProcessorMTMutator to make refs 1160 // discovery MT-safe during such phases or other parallel 1161 // discovery phases in the future. This may all go away 1162 // if/when we decide that refs discovery is sufficiently 1163 // rare that the cost of the CAS's involved is in the 1164 // noise. That's a measurement that should be done, and 1165 // the code simplified if that turns out to be the case. 1166 return ConcGCThreads > 1; 1167 } 1168 1169 // Override 1170 virtual void ref_processor_init(); 1171 1172 // Grow generation by specified size (returns false if unable to grow) 1173 bool grow_by(size_t bytes); 1174 // Grow generation to reserved size. 1175 bool grow_to_reserved(); 1176 1177 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1178 1179 // Space enquiries 1180 size_t capacity() const; 1181 size_t used() const; 1182 size_t free() const; 1183 double occupancy() const { return ((double)used())/((double)capacity()); } 1184 size_t contiguous_available() const; 1185 size_t unsafe_max_alloc_nogc() const; 1186 1187 // over-rides 1188 MemRegion used_region() const; 1189 MemRegion used_region_at_save_marks() const; 1190 1191 // Does a "full" (forced) collection invoked on this generation collect 1192 // all younger generations as well? Note that the second conjunct is a 1193 // hack to allow the collection of the younger gen first if the flag is 1194 // set. This is better than using th policy's should_collect_gen0_first() 1195 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1196 virtual bool full_collects_younger_generations() const { 1197 return UseCMSCompactAtFullCollection && !CollectGen0First; 1198 } 1199 1200 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1201 1202 // Support for compaction 1203 CompactibleSpace* first_compaction_space() const; 1204 // Adjust quantites in the generation affected by 1205 // the compaction. 1206 void reset_after_compaction(); 1207 1208 // Allocation support 1209 HeapWord* allocate(size_t size, bool tlab); 1210 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1211 oop promote(oop obj, size_t obj_size); 1212 HeapWord* par_allocate(size_t size, bool tlab) { 1213 return allocate(size, tlab); 1214 } 1215 1216 // Incremental mode triggering. 1217 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1218 size_t word_size); 1219 1220 // Used by CMSStats to track direct allocation. The value is sampled and 1221 // reset after each young gen collection. 1222 size_t direct_allocated_words() const { return _direct_allocated_words; } 1223 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1224 1225 // Overrides for parallel promotion. 1226 virtual oop par_promote(int thread_num, 1227 oop obj, markOop m, size_t word_sz); 1228 // This one should not be called for CMS. 1229 virtual void par_promote_alloc_undo(int thread_num, 1230 HeapWord* obj, size_t word_sz); 1231 virtual void par_promote_alloc_done(int thread_num); 1232 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1233 1234 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1235 1236 // Inform this (non-young) generation that a promotion failure was 1237 // encountered during a collection of a younger generation that 1238 // promotes into this generation. 1239 virtual void promotion_failure_occurred(); 1240 1241 bool should_collect(bool full, size_t size, bool tlab); 1242 virtual bool should_concurrent_collect() const; 1243 virtual bool is_too_full() const; 1244 void collect(bool full, 1245 bool clear_all_soft_refs, 1246 size_t size, 1247 bool tlab); 1248 1249 HeapWord* expand_and_allocate(size_t word_size, 1250 bool tlab, 1251 bool parallel = false); 1252 1253 // GC prologue and epilogue 1254 void gc_prologue(bool full); 1255 void gc_prologue_work(bool full, bool registerClosure, 1256 ModUnionClosure* modUnionClosure); 1257 void gc_epilogue(bool full); 1258 void gc_epilogue_work(bool full); 1259 1260 // Time since last GC of this generation 1261 jlong time_of_last_gc(jlong now) { 1262 return collector()->time_of_last_gc(now); 1263 } 1264 void update_time_of_last_gc(jlong now) { 1265 collector()-> update_time_of_last_gc(now); 1266 } 1267 1268 // Allocation failure 1269 void expand(size_t bytes, size_t expand_bytes, 1270 CMSExpansionCause::Cause cause); 1271 virtual bool expand(size_t bytes, size_t expand_bytes); 1272 void shrink(size_t bytes); 1273 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1274 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1275 1276 // Iteration support and related enquiries 1277 void save_marks(); 1278 bool no_allocs_since_save_marks(); 1279 void object_iterate_since_last_GC(ObjectClosure* cl); 1280 void younger_refs_iterate(OopsInGenClosure* cl); 1281 1282 // Iteration support specific to CMS generations 1283 void save_sweep_limit(); 1284 1285 // More iteration support 1286 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 1287 virtual void oop_iterate(OopClosure* cl); 1288 virtual void safe_object_iterate(ObjectClosure* cl); 1289 virtual void object_iterate(ObjectClosure* cl); 1290 1291 // Need to declare the full complement of closures, whether we'll 1292 // override them or not, or get message from the compiler: 1293 // oop_since_save_marks_iterate_nv hides virtual function... 1294 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1295 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1296 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1297 1298 // Smart allocation XXX -- move to CFLSpace? 1299 void setNearLargestChunk(); 1300 bool isNearLargestChunk(HeapWord* addr); 1301 1302 // Get the chunk at the end of the space. Delagates to 1303 // the space. 1304 FreeChunk* find_chunk_at_end(); 1305 1306 // Overriding of unused functionality (sharing not yet supported with CMS) 1307 void pre_adjust_pointers(); 1308 void post_compact(); 1309 1310 // Debugging 1311 void prepare_for_verify(); 1312 void verify(); 1313 void print_statistics() PRODUCT_RETURN; 1314 1315 // Performance Counters support 1316 virtual void update_counters(); 1317 virtual void update_counters(size_t used); 1318 void initialize_performance_counters(); 1319 CollectorCounters* counters() { return collector()->counters(); } 1320 1321 // Support for parallel remark of survivor space 1322 void* get_data_recorder(int thr_num) { 1323 //Delegate to collector 1324 return collector()->get_data_recorder(thr_num); 1325 } 1326 void sample_eden_chunk() { 1327 //Delegate to collector 1328 return collector()->sample_eden_chunk(); 1329 } 1330 1331 // Printing 1332 const char* name() const; 1333 virtual const char* short_name() const { return "CMS"; } 1334 void print() const; 1335 void printOccupancy(const char* s); 1336 bool must_be_youngest() const { return false; } 1337 bool must_be_oldest() const { return true; } 1338 1339 void compute_new_size(); 1340 1341 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1342 void rotate_debug_collection_type(); 1343 }; 1344 1345 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1346 1347 // Return the size policy from the heap's collector 1348 // policy casted to CMSAdaptiveSizePolicy*. 1349 CMSAdaptiveSizePolicy* cms_size_policy() const; 1350 1351 // Resize the generation based on the adaptive size 1352 // policy. 1353 void resize(size_t cur_promo, size_t desired_promo); 1354 1355 // Return the GC counters from the collector policy 1356 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1357 1358 virtual void shrink_by(size_t bytes); 1359 1360 public: 1361 virtual void compute_new_size(); 1362 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1363 int level, CardTableRS* ct, 1364 bool use_adaptive_freelists, 1365 FreeBlockDictionary<FreeChunk>::DictionaryChoice 1366 dictionaryChoice) : 1367 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1368 use_adaptive_freelists, dictionaryChoice) {} 1369 1370 virtual const char* short_name() const { return "ASCMS"; } 1371 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1372 1373 virtual void update_counters(); 1374 virtual void update_counters(size_t used); 1375 }; 1376 1377 // 1378 // Closures of various sorts used by CMS to accomplish its work 1379 // 1380 1381 // This closure is used to check that a certain set of oops is empty. 1382 class FalseClosure: public OopClosure { 1383 public: 1384 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } 1385 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } 1386 }; 1387 1388 // This closure is used to do concurrent marking from the roots 1389 // following the first checkpoint. 1390 class MarkFromRootsClosure: public BitMapClosure { 1391 CMSCollector* _collector; 1392 MemRegion _span; 1393 CMSBitMap* _bitMap; 1394 CMSBitMap* _mut; 1395 CMSMarkStack* _markStack; 1396 CMSMarkStack* _revisitStack; 1397 bool _yield; 1398 int _skipBits; 1399 HeapWord* _finger; 1400 HeapWord* _threshold; 1401 DEBUG_ONLY(bool _verifying;) 1402 1403 public: 1404 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1405 CMSBitMap* bitMap, 1406 CMSMarkStack* markStack, 1407 CMSMarkStack* revisitStack, 1408 bool should_yield, bool verifying = false); 1409 bool do_bit(size_t offset); 1410 void reset(HeapWord* addr); 1411 inline void do_yield_check(); 1412 1413 private: 1414 void scanOopsInOop(HeapWord* ptr); 1415 void do_yield_work(); 1416 }; 1417 1418 // This closure is used to do concurrent multi-threaded 1419 // marking from the roots following the first checkpoint. 1420 // XXX This should really be a subclass of The serial version 1421 // above, but i have not had the time to refactor things cleanly. 1422 // That willbe done for Dolphin. 1423 class Par_MarkFromRootsClosure: public BitMapClosure { 1424 CMSCollector* _collector; 1425 MemRegion _whole_span; 1426 MemRegion _span; 1427 CMSBitMap* _bit_map; 1428 CMSBitMap* _mut; 1429 OopTaskQueue* _work_queue; 1430 CMSMarkStack* _overflow_stack; 1431 CMSMarkStack* _revisit_stack; 1432 bool _yield; 1433 int _skip_bits; 1434 HeapWord* _finger; 1435 HeapWord* _threshold; 1436 CMSConcMarkingTask* _task; 1437 public: 1438 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1439 MemRegion span, 1440 CMSBitMap* bit_map, 1441 OopTaskQueue* work_queue, 1442 CMSMarkStack* overflow_stack, 1443 CMSMarkStack* revisit_stack, 1444 bool should_yield); 1445 bool do_bit(size_t offset); 1446 inline void do_yield_check(); 1447 1448 private: 1449 void scan_oops_in_oop(HeapWord* ptr); 1450 void do_yield_work(); 1451 bool get_work_from_overflow_stack(); 1452 }; 1453 1454 // The following closures are used to do certain kinds of verification of 1455 // CMS marking. 1456 class PushAndMarkVerifyClosure: public OopClosure { 1457 CMSCollector* _collector; 1458 MemRegion _span; 1459 CMSBitMap* _verification_bm; 1460 CMSBitMap* _cms_bm; 1461 CMSMarkStack* _mark_stack; 1462 protected: 1463 void do_oop(oop p); 1464 template <class T> inline void do_oop_work(T *p) { 1465 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 1466 do_oop(obj); 1467 } 1468 public: 1469 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1470 MemRegion span, 1471 CMSBitMap* verification_bm, 1472 CMSBitMap* cms_bm, 1473 CMSMarkStack* mark_stack); 1474 void do_oop(oop* p); 1475 void do_oop(narrowOop* p); 1476 // Deal with a stack overflow condition 1477 void handle_stack_overflow(HeapWord* lost); 1478 }; 1479 1480 class MarkFromRootsVerifyClosure: public BitMapClosure { 1481 CMSCollector* _collector; 1482 MemRegion _span; 1483 CMSBitMap* _verification_bm; 1484 CMSBitMap* _cms_bm; 1485 CMSMarkStack* _mark_stack; 1486 HeapWord* _finger; 1487 PushAndMarkVerifyClosure _pam_verify_closure; 1488 public: 1489 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1490 CMSBitMap* verification_bm, 1491 CMSBitMap* cms_bm, 1492 CMSMarkStack* mark_stack); 1493 bool do_bit(size_t offset); 1494 void reset(HeapWord* addr); 1495 }; 1496 1497 1498 // This closure is used to check that a certain set of bits is 1499 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1500 class FalseBitMapClosure: public BitMapClosure { 1501 public: 1502 bool do_bit(size_t offset) { 1503 guarantee(false, "Should not have a 1 bit"); 1504 return true; 1505 } 1506 }; 1507 1508 // This closure is used during the second checkpointing phase 1509 // to rescan the marked objects on the dirty cards in the mod 1510 // union table and the card table proper. It's invoked via 1511 // MarkFromDirtyCardsClosure below. It uses either 1512 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1513 // declared in genOopClosures.hpp to accomplish some of its work. 1514 // In the parallel case the bitMap is shared, so access to 1515 // it needs to be suitably synchronized for updates by embedded 1516 // closures that update it; however, this closure itself only 1517 // reads the bit_map and because it is idempotent, is immune to 1518 // reading stale values. 1519 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1520 #ifdef ASSERT 1521 CMSCollector* _collector; 1522 MemRegion _span; 1523 union { 1524 CMSMarkStack* _mark_stack; 1525 OopTaskQueue* _work_queue; 1526 }; 1527 #endif // ASSERT 1528 bool _parallel; 1529 CMSBitMap* _bit_map; 1530 union { 1531 MarkRefsIntoAndScanClosure* _scan_closure; 1532 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1533 }; 1534 1535 public: 1536 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1537 MemRegion span, 1538 ReferenceProcessor* rp, 1539 CMSBitMap* bit_map, 1540 CMSMarkStack* mark_stack, 1541 CMSMarkStack* revisit_stack, 1542 MarkRefsIntoAndScanClosure* cl): 1543 #ifdef ASSERT 1544 _collector(collector), 1545 _span(span), 1546 _mark_stack(mark_stack), 1547 #endif // ASSERT 1548 _parallel(false), 1549 _bit_map(bit_map), 1550 _scan_closure(cl) { } 1551 1552 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1553 MemRegion span, 1554 ReferenceProcessor* rp, 1555 CMSBitMap* bit_map, 1556 OopTaskQueue* work_queue, 1557 CMSMarkStack* revisit_stack, 1558 Par_MarkRefsIntoAndScanClosure* cl): 1559 #ifdef ASSERT 1560 _collector(collector), 1561 _span(span), 1562 _work_queue(work_queue), 1563 #endif // ASSERT 1564 _parallel(true), 1565 _bit_map(bit_map), 1566 _par_scan_closure(cl) { } 1567 1568 void do_object(oop obj) { 1569 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1570 } 1571 bool do_object_b(oop obj) { 1572 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1573 return false; 1574 } 1575 bool do_object_bm(oop p, MemRegion mr); 1576 }; 1577 1578 // This closure is used during the second checkpointing phase 1579 // to rescan the marked objects on the dirty cards in the mod 1580 // union table and the card table proper. It invokes 1581 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1582 // In the parallel case, the bit map is shared and requires 1583 // synchronized access. 1584 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1585 CompactibleFreeListSpace* _space; 1586 ScanMarkedObjectsAgainClosure _scan_cl; 1587 size_t _num_dirty_cards; 1588 1589 public: 1590 MarkFromDirtyCardsClosure(CMSCollector* collector, 1591 MemRegion span, 1592 CompactibleFreeListSpace* space, 1593 CMSBitMap* bit_map, 1594 CMSMarkStack* mark_stack, 1595 CMSMarkStack* revisit_stack, 1596 MarkRefsIntoAndScanClosure* cl): 1597 _space(space), 1598 _num_dirty_cards(0), 1599 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1600 mark_stack, revisit_stack, cl) { } 1601 1602 MarkFromDirtyCardsClosure(CMSCollector* collector, 1603 MemRegion span, 1604 CompactibleFreeListSpace* space, 1605 CMSBitMap* bit_map, 1606 OopTaskQueue* work_queue, 1607 CMSMarkStack* revisit_stack, 1608 Par_MarkRefsIntoAndScanClosure* cl): 1609 _space(space), 1610 _num_dirty_cards(0), 1611 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1612 work_queue, revisit_stack, cl) { } 1613 1614 void do_MemRegion(MemRegion mr); 1615 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1616 size_t num_dirty_cards() { return _num_dirty_cards; } 1617 }; 1618 1619 // This closure is used in the non-product build to check 1620 // that there are no MemRegions with a certain property. 1621 class FalseMemRegionClosure: public MemRegionClosure { 1622 void do_MemRegion(MemRegion mr) { 1623 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1624 guarantee(false, "Should never be here"); 1625 } 1626 }; 1627 1628 // This closure is used during the precleaning phase 1629 // to "carefully" rescan marked objects on dirty cards. 1630 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1631 // to accomplish some of its work. 1632 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1633 CMSCollector* _collector; 1634 MemRegion _span; 1635 bool _yield; 1636 Mutex* _freelistLock; 1637 CMSBitMap* _bitMap; 1638 CMSMarkStack* _markStack; 1639 MarkRefsIntoAndScanClosure* _scanningClosure; 1640 1641 public: 1642 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1643 MemRegion span, 1644 CMSBitMap* bitMap, 1645 CMSMarkStack* markStack, 1646 CMSMarkStack* revisitStack, 1647 MarkRefsIntoAndScanClosure* cl, 1648 bool should_yield): 1649 _collector(collector), 1650 _span(span), 1651 _yield(should_yield), 1652 _bitMap(bitMap), 1653 _markStack(markStack), 1654 _scanningClosure(cl) { 1655 } 1656 1657 void do_object(oop p) { 1658 guarantee(false, "call do_object_careful instead"); 1659 } 1660 1661 size_t do_object_careful(oop p) { 1662 guarantee(false, "Unexpected caller"); 1663 return 0; 1664 } 1665 1666 size_t do_object_careful_m(oop p, MemRegion mr); 1667 1668 void setFreelistLock(Mutex* m) { 1669 _freelistLock = m; 1670 _scanningClosure->set_freelistLock(m); 1671 } 1672 1673 private: 1674 inline bool do_yield_check(); 1675 1676 void do_yield_work(); 1677 }; 1678 1679 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1680 CMSCollector* _collector; 1681 MemRegion _span; 1682 bool _yield; 1683 CMSBitMap* _bit_map; 1684 CMSMarkStack* _mark_stack; 1685 PushAndMarkClosure* _scanning_closure; 1686 unsigned int _before_count; 1687 1688 public: 1689 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1690 MemRegion span, 1691 CMSBitMap* bit_map, 1692 CMSMarkStack* mark_stack, 1693 PushAndMarkClosure* cl, 1694 unsigned int before_count, 1695 bool should_yield): 1696 _collector(collector), 1697 _span(span), 1698 _yield(should_yield), 1699 _bit_map(bit_map), 1700 _mark_stack(mark_stack), 1701 _scanning_closure(cl), 1702 _before_count(before_count) 1703 { } 1704 1705 void do_object(oop p) { 1706 guarantee(false, "call do_object_careful instead"); 1707 } 1708 1709 size_t do_object_careful(oop p); 1710 1711 size_t do_object_careful_m(oop p, MemRegion mr) { 1712 guarantee(false, "Unexpected caller"); 1713 return 0; 1714 } 1715 1716 private: 1717 inline void do_yield_check(); 1718 void do_yield_work(); 1719 }; 1720 1721 // This closure is used to accomplish the sweeping work 1722 // after the second checkpoint but before the concurrent reset 1723 // phase. 1724 // 1725 // Terminology 1726 // left hand chunk (LHC) - block of one or more chunks currently being 1727 // coalesced. The LHC is available for coalescing with a new chunk. 1728 // right hand chunk (RHC) - block that is currently being swept that is 1729 // free or garbage that can be coalesced with the LHC. 1730 // _inFreeRange is true if there is currently a LHC 1731 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1732 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1733 // _freeFinger is the address of the current LHC 1734 class SweepClosure: public BlkClosureCareful { 1735 CMSCollector* _collector; // collector doing the work 1736 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1737 CompactibleFreeListSpace* _sp; // Space being swept 1738 HeapWord* _limit;// the address at or above which the sweep should stop 1739 // because we do not expect newly garbage blocks 1740 // eligible for sweeping past that address. 1741 Mutex* _freelistLock; // Free list lock (in space) 1742 CMSBitMap* _bitMap; // Marking bit map (in 1743 // generation) 1744 bool _inFreeRange; // Indicates if we are in the 1745 // midst of a free run 1746 bool _freeRangeInFreeLists; 1747 // Often, we have just found 1748 // a free chunk and started 1749 // a new free range; we do not 1750 // eagerly remove this chunk from 1751 // the free lists unless there is 1752 // a possibility of coalescing. 1753 // When true, this flag indicates 1754 // that the _freeFinger below 1755 // points to a potentially free chunk 1756 // that may still be in the free lists 1757 bool _lastFreeRangeCoalesced; 1758 // free range contains chunks 1759 // coalesced 1760 bool _yield; 1761 // Whether sweeping should be 1762 // done with yields. For instance 1763 // when done by the foreground 1764 // collector we shouldn't yield. 1765 HeapWord* _freeFinger; // When _inFreeRange is set, the 1766 // pointer to the "left hand 1767 // chunk" 1768 size_t _freeRangeSize; 1769 // When _inFreeRange is set, this 1770 // indicates the accumulated size 1771 // of the "left hand chunk" 1772 NOT_PRODUCT( 1773 size_t _numObjectsFreed; 1774 size_t _numWordsFreed; 1775 size_t _numObjectsLive; 1776 size_t _numWordsLive; 1777 size_t _numObjectsAlreadyFree; 1778 size_t _numWordsAlreadyFree; 1779 FreeChunk* _last_fc; 1780 ) 1781 private: 1782 // Code that is common to a free chunk or garbage when 1783 // encountered during sweeping. 1784 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1785 // Process a free chunk during sweeping. 1786 void do_already_free_chunk(FreeChunk *fc); 1787 // Work method called when processing an already free or a 1788 // freshly garbage chunk to do a lookahead and possibly a 1789 // premptive flush if crossing over _limit. 1790 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1791 // Process a garbage chunk during sweeping. 1792 size_t do_garbage_chunk(FreeChunk *fc); 1793 // Process a live chunk during sweeping. 1794 size_t do_live_chunk(FreeChunk* fc); 1795 1796 // Accessors. 1797 HeapWord* freeFinger() const { return _freeFinger; } 1798 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1799 bool inFreeRange() const { return _inFreeRange; } 1800 void set_inFreeRange(bool v) { _inFreeRange = v; } 1801 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1802 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1803 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1804 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1805 1806 // Initialize a free range. 1807 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1808 // Return this chunk to the free lists. 1809 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1810 1811 // Check if we should yield and do so when necessary. 1812 inline void do_yield_check(HeapWord* addr); 1813 1814 // Yield 1815 void do_yield_work(HeapWord* addr); 1816 1817 // Debugging/Printing 1818 void print_free_block_coalesced(FreeChunk* fc) const; 1819 1820 public: 1821 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1822 CMSBitMap* bitMap, bool should_yield); 1823 ~SweepClosure() PRODUCT_RETURN; 1824 1825 size_t do_blk_careful(HeapWord* addr); 1826 void print() const { print_on(tty); } 1827 void print_on(outputStream *st) const; 1828 }; 1829 1830 // Closures related to weak references processing 1831 1832 // During CMS' weak reference processing, this is a 1833 // work-routine/closure used to complete transitive 1834 // marking of objects as live after a certain point 1835 // in which an initial set has been completely accumulated. 1836 // This closure is currently used both during the final 1837 // remark stop-world phase, as well as during the concurrent 1838 // precleaning of the discovered reference lists. 1839 class CMSDrainMarkingStackClosure: public VoidClosure { 1840 CMSCollector* _collector; 1841 MemRegion _span; 1842 CMSMarkStack* _mark_stack; 1843 CMSBitMap* _bit_map; 1844 CMSKeepAliveClosure* _keep_alive; 1845 bool _concurrent_precleaning; 1846 public: 1847 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1848 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1849 CMSKeepAliveClosure* keep_alive, 1850 bool cpc): 1851 _collector(collector), 1852 _span(span), 1853 _bit_map(bit_map), 1854 _mark_stack(mark_stack), 1855 _keep_alive(keep_alive), 1856 _concurrent_precleaning(cpc) { 1857 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1858 "Mismatch"); 1859 } 1860 1861 void do_void(); 1862 }; 1863 1864 // A parallel version of CMSDrainMarkingStackClosure above. 1865 class CMSParDrainMarkingStackClosure: public VoidClosure { 1866 CMSCollector* _collector; 1867 MemRegion _span; 1868 OopTaskQueue* _work_queue; 1869 CMSBitMap* _bit_map; 1870 CMSInnerParMarkAndPushClosure _mark_and_push; 1871 1872 public: 1873 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1874 MemRegion span, CMSBitMap* bit_map, 1875 CMSMarkStack* revisit_stack, 1876 OopTaskQueue* work_queue): 1877 _collector(collector), 1878 _span(span), 1879 _bit_map(bit_map), 1880 _work_queue(work_queue), 1881 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } 1882 1883 public: 1884 void trim_queue(uint max); 1885 void do_void(); 1886 }; 1887 1888 // Allow yielding or short-circuiting of reference list 1889 // prelceaning work. 1890 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1891 CMSCollector* _collector; 1892 void do_yield_work(); 1893 public: 1894 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1895 _collector(collector) {} 1896 virtual bool should_return(); 1897 }; 1898 1899 1900 // Convenience class that locks free list locks for given CMS collector 1901 class FreelistLocker: public StackObj { 1902 private: 1903 CMSCollector* _collector; 1904 public: 1905 FreelistLocker(CMSCollector* collector): 1906 _collector(collector) { 1907 _collector->getFreelistLocks(); 1908 } 1909 1910 ~FreelistLocker() { 1911 _collector->releaseFreelistLocks(); 1912 } 1913 }; 1914 1915 // Mark all dead objects in a given space. 1916 class MarkDeadObjectsClosure: public BlkClosure { 1917 const CMSCollector* _collector; 1918 const CompactibleFreeListSpace* _sp; 1919 CMSBitMap* _live_bit_map; 1920 CMSBitMap* _dead_bit_map; 1921 public: 1922 MarkDeadObjectsClosure(const CMSCollector* collector, 1923 const CompactibleFreeListSpace* sp, 1924 CMSBitMap *live_bit_map, 1925 CMSBitMap *dead_bit_map) : 1926 _collector(collector), 1927 _sp(sp), 1928 _live_bit_map(live_bit_map), 1929 _dead_bit_map(dead_bit_map) {} 1930 size_t do_blk(HeapWord* addr); 1931 }; 1932 1933 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1934 1935 public: 1936 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1937 }; 1938 1939 1940 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP