1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gSpaceCounters.hpp" 29 #include "gc_implementation/shared/gcStats.hpp" 30 #include "gc_implementation/shared/generationCounters.hpp" 31 #include "memory/freeBlockDictionary.hpp" 32 #include "memory/generation.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/virtualspace.hpp" 35 #include "services/memoryService.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/stack.inline.hpp" 38 #include "utilities/taskqueue.hpp" 39 #include "utilities/yieldingWorkgroup.hpp" 40 41 // ConcurrentMarkSweepGeneration is in support of a concurrent 42 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 43 // style. We assume, for now, that this generation is always the 44 // seniormost generation and for simplicity 45 // in the first implementation, that this generation is a single compactible 46 // space. Neither of these restrictions appears essential, and will be 47 // relaxed in the future when more time is available to implement the 48 // greater generality (and there's a need for it). 49 // 50 // Concurrent mode failures are currently handled by 51 // means of a sliding mark-compact. 52 53 class CMSAdaptiveSizePolicy; 54 class CMSConcMarkingTask; 55 class CMSGCAdaptivePolicyCounters; 56 class ConcurrentMarkSweepGeneration; 57 class ConcurrentMarkSweepPolicy; 58 class ConcurrentMarkSweepThread; 59 class CompactibleFreeListSpace; 60 class FreeChunk; 61 class PromotionInfo; 62 class ScanMarkedObjectsAgainCarefullyClosure; 63 class TenuredGeneration; 64 65 // A generic CMS bit map. It's the basis for both the CMS marking bit map 66 // as well as for the mod union table (in each case only a subset of the 67 // methods are used). This is essentially a wrapper around the BitMap class, 68 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 69 // we have _shifter == 0. and for the mod union table we have 70 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 71 // XXX 64-bit issues in BitMap? 72 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 73 friend class VMStructs; 74 75 HeapWord* _bmStartWord; // base address of range covered by map 76 size_t _bmWordSize; // map size (in #HeapWords covered) 77 const int _shifter; // shifts to convert HeapWord to bit position 78 VirtualSpace _virtual_space; // underlying the bit map 79 BitMap _bm; // the bit map itself 80 public: 81 Mutex* const _lock; // mutex protecting _bm; 82 83 public: 84 // constructor 85 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 86 87 // allocates the actual storage for the map 88 bool allocate(MemRegion mr); 89 // field getter 90 Mutex* lock() const { return _lock; } 91 // locking verifier convenience function 92 void assert_locked() const PRODUCT_RETURN; 93 94 // inquiries 95 HeapWord* startWord() const { return _bmStartWord; } 96 size_t sizeInWords() const { return _bmWordSize; } 97 size_t sizeInBits() const { return _bm.size(); } 98 // the following is one past the last word in space 99 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 100 101 // reading marks 102 bool isMarked(HeapWord* addr) const; 103 bool par_isMarked(HeapWord* addr) const; // do not lock checks 104 bool isUnmarked(HeapWord* addr) const; 105 bool isAllClear() const; 106 107 // writing marks 108 void mark(HeapWord* addr); 109 // For marking by parallel GC threads; 110 // returns true if we did, false if another thread did 111 bool par_mark(HeapWord* addr); 112 113 void mark_range(MemRegion mr); 114 void par_mark_range(MemRegion mr); 115 void mark_large_range(MemRegion mr); 116 void par_mark_large_range(MemRegion mr); 117 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 118 void clear_range(MemRegion mr); 119 void par_clear_range(MemRegion mr); 120 void clear_large_range(MemRegion mr); 121 void par_clear_large_range(MemRegion mr); 122 void clear_all(); 123 void clear_all_incrementally(); // Not yet implemented!! 124 125 NOT_PRODUCT( 126 // checks the memory region for validity 127 void region_invariant(MemRegion mr); 128 ) 129 130 // iteration 131 void iterate(BitMapClosure* cl) { 132 _bm.iterate(cl); 133 } 134 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 135 void dirty_range_iterate_clear(MemRegionClosure* cl); 136 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 137 138 // auxiliary support for iteration 139 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 140 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 141 HeapWord* end_addr) const; 142 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 143 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 144 HeapWord* end_addr) const; 145 MemRegion getAndClearMarkedRegion(HeapWord* addr); 146 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 147 HeapWord* end_addr); 148 149 // conversion utilities 150 HeapWord* offsetToHeapWord(size_t offset) const; 151 size_t heapWordToOffset(HeapWord* addr) const; 152 size_t heapWordDiffToOffsetDiff(size_t diff) const; 153 154 void print_on_error(outputStream* st, const char* prefix) const; 155 156 // debugging 157 // is this address range covered by the bit-map? 158 NOT_PRODUCT( 159 bool covers(MemRegion mr) const; 160 bool covers(HeapWord* start, size_t size = 0) const; 161 ) 162 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 163 }; 164 165 // Represents a marking stack used by the CMS collector. 166 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 167 class CMSMarkStack: public CHeapObj<mtGC> { 168 // 169 friend class CMSCollector; // to get at expasion stats further below 170 // 171 172 VirtualSpace _virtual_space; // space for the stack 173 oop* _base; // bottom of stack 174 size_t _index; // one more than last occupied index 175 size_t _capacity; // max #elements 176 Mutex _par_lock; // an advisory lock used in case of parallel access 177 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 178 179 protected: 180 size_t _hit_limit; // we hit max stack size limit 181 size_t _failed_double; // we failed expansion before hitting limit 182 183 public: 184 CMSMarkStack(): 185 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 186 _hit_limit(0), 187 _failed_double(0) {} 188 189 bool allocate(size_t size); 190 191 size_t capacity() const { return _capacity; } 192 193 oop pop() { 194 if (!isEmpty()) { 195 return _base[--_index] ; 196 } 197 return NULL; 198 } 199 200 bool push(oop ptr) { 201 if (isFull()) { 202 return false; 203 } else { 204 _base[_index++] = ptr; 205 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 206 return true; 207 } 208 } 209 210 bool isEmpty() const { return _index == 0; } 211 bool isFull() const { 212 assert(_index <= _capacity, "buffer overflow"); 213 return _index == _capacity; 214 } 215 216 size_t length() { return _index; } 217 218 // "Parallel versions" of some of the above 219 oop par_pop() { 220 // lock and pop 221 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 222 return pop(); 223 } 224 225 bool par_push(oop ptr) { 226 // lock and push 227 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 228 return push(ptr); 229 } 230 231 // Forcibly reset the stack, losing all of its contents. 232 void reset() { 233 _index = 0; 234 } 235 236 // Expand the stack, typically in response to an overflow condition 237 void expand(); 238 239 // Compute the least valued stack element. 240 oop least_value(HeapWord* low) { 241 oop least = (oop)low; 242 for (size_t i = 0; i < _index; i++) { 243 least = MIN2(least, _base[i]); 244 } 245 return least; 246 } 247 248 // Exposed here to allow stack expansion in || case 249 Mutex* par_lock() { return &_par_lock; } 250 }; 251 252 class CardTableRS; 253 class CMSParGCThreadState; 254 255 class ModUnionClosure: public MemRegionClosure { 256 protected: 257 CMSBitMap* _t; 258 public: 259 ModUnionClosure(CMSBitMap* t): _t(t) { } 260 void do_MemRegion(MemRegion mr); 261 }; 262 263 class ModUnionClosurePar: public ModUnionClosure { 264 public: 265 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 266 void do_MemRegion(MemRegion mr); 267 }; 268 269 // Survivor Chunk Array in support of parallelization of 270 // Survivor Space rescan. 271 class ChunkArray: public CHeapObj<mtGC> { 272 size_t _index; 273 size_t _capacity; 274 size_t _overflows; 275 HeapWord** _array; // storage for array 276 277 public: 278 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 279 ChunkArray(HeapWord** a, size_t c): 280 _index(0), _capacity(c), _overflows(0), _array(a) {} 281 282 HeapWord** array() { return _array; } 283 void set_array(HeapWord** a) { _array = a; } 284 285 size_t capacity() { return _capacity; } 286 void set_capacity(size_t c) { _capacity = c; } 287 288 size_t end() { 289 assert(_index <= capacity(), 290 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 291 _index, _capacity)); 292 return _index; 293 } // exclusive 294 295 HeapWord* nth(size_t n) { 296 assert(n < end(), "Out of bounds access"); 297 return _array[n]; 298 } 299 300 void reset() { 301 _index = 0; 302 if (_overflows > 0 && PrintCMSStatistics > 1) { 303 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 304 _capacity, _overflows); 305 } 306 _overflows = 0; 307 } 308 309 void record_sample(HeapWord* p, size_t sz) { 310 // For now we do not do anything with the size 311 if (_index < _capacity) { 312 _array[_index++] = p; 313 } else { 314 ++_overflows; 315 assert(_index == _capacity, 316 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 317 "): out of bounds at overflow#" SIZE_FORMAT, 318 _index, _capacity, _overflows)); 319 } 320 } 321 }; 322 323 // 324 // Timing, allocation and promotion statistics for gc scheduling and incremental 325 // mode pacing. Most statistics are exponential averages. 326 // 327 class CMSStats VALUE_OBJ_CLASS_SPEC { 328 private: 329 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 330 331 // The following are exponential averages with factor alpha: 332 // avg = (100 - alpha) * avg + alpha * cur_sample 333 // 334 // The durations measure: end_time[n] - start_time[n] 335 // The periods measure: start_time[n] - start_time[n-1] 336 // 337 // The cms period and duration include only concurrent collections; time spent 338 // in foreground cms collections due to System.gc() or because of a failure to 339 // keep up are not included. 340 // 341 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 342 // real value, but is used only after the first period. A value of 100 is 343 // used for the first sample so it gets the entire weight. 344 unsigned int _saved_alpha; // 0-100 345 unsigned int _gc0_alpha; 346 unsigned int _cms_alpha; 347 348 double _gc0_duration; 349 double _gc0_period; 350 size_t _gc0_promoted; // bytes promoted per gc0 351 double _cms_duration; 352 double _cms_duration_pre_sweep; // time from initiation to start of sweep 353 double _cms_duration_per_mb; 354 double _cms_period; 355 size_t _cms_allocated; // bytes of direct allocation per gc0 period 356 357 // Timers. 358 elapsedTimer _cms_timer; 359 TimeStamp _gc0_begin_time; 360 TimeStamp _cms_begin_time; 361 TimeStamp _cms_end_time; 362 363 // Snapshots of the amount used in the CMS generation. 364 size_t _cms_used_at_gc0_begin; 365 size_t _cms_used_at_gc0_end; 366 size_t _cms_used_at_cms_begin; 367 368 // Used to prevent the duty cycle from being reduced in the middle of a cms 369 // cycle. 370 bool _allow_duty_cycle_reduction; 371 372 enum { 373 _GC0_VALID = 0x1, 374 _CMS_VALID = 0x2, 375 _ALL_VALID = _GC0_VALID | _CMS_VALID 376 }; 377 378 unsigned int _valid_bits; 379 380 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 381 382 protected: 383 384 // Return a duty cycle that avoids wild oscillations, by limiting the amount 385 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 386 // as a recommended value). 387 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 388 unsigned int new_duty_cycle); 389 unsigned int icms_update_duty_cycle_impl(); 390 391 // In support of adjusting of cms trigger ratios based on history 392 // of concurrent mode failure. 393 double cms_free_adjustment_factor(size_t free) const; 394 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 395 396 public: 397 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 398 unsigned int alpha = CMSExpAvgFactor); 399 400 // Whether or not the statistics contain valid data; higher level statistics 401 // cannot be called until this returns true (they require at least one young 402 // gen and one cms cycle to have completed). 403 bool valid() const; 404 405 // Record statistics. 406 void record_gc0_begin(); 407 void record_gc0_end(size_t cms_gen_bytes_used); 408 void record_cms_begin(); 409 void record_cms_end(); 410 411 // Allow management of the cms timer, which must be stopped/started around 412 // yield points. 413 elapsedTimer& cms_timer() { return _cms_timer; } 414 void start_cms_timer() { _cms_timer.start(); } 415 void stop_cms_timer() { _cms_timer.stop(); } 416 417 // Basic statistics; units are seconds or bytes. 418 double gc0_period() const { return _gc0_period; } 419 double gc0_duration() const { return _gc0_duration; } 420 size_t gc0_promoted() const { return _gc0_promoted; } 421 double cms_period() const { return _cms_period; } 422 double cms_duration() const { return _cms_duration; } 423 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 424 size_t cms_allocated() const { return _cms_allocated; } 425 426 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 427 428 // Seconds since the last background cms cycle began or ended. 429 double cms_time_since_begin() const; 430 double cms_time_since_end() const; 431 432 // Higher level statistics--caller must check that valid() returns true before 433 // calling. 434 435 // Returns bytes promoted per second of wall clock time. 436 double promotion_rate() const; 437 438 // Returns bytes directly allocated per second of wall clock time. 439 double cms_allocation_rate() const; 440 441 // Rate at which space in the cms generation is being consumed (sum of the 442 // above two). 443 double cms_consumption_rate() const; 444 445 // Returns an estimate of the number of seconds until the cms generation will 446 // fill up, assuming no collection work is done. 447 double time_until_cms_gen_full() const; 448 449 // Returns an estimate of the number of seconds remaining until 450 // the cms generation collection should start. 451 double time_until_cms_start() const; 452 453 // End of higher level statistics. 454 455 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 456 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 457 458 // Update the duty cycle and return the new value. 459 unsigned int icms_update_duty_cycle(); 460 461 // Debugging. 462 void print_on(outputStream* st) const PRODUCT_RETURN; 463 void print() const { print_on(gclog_or_tty); } 464 }; 465 466 // A closure related to weak references processing which 467 // we embed in the CMSCollector, since we need to pass 468 // it to the reference processor for secondary filtering 469 // of references based on reachability of referent; 470 // see role of _is_alive_non_header closure in the 471 // ReferenceProcessor class. 472 // For objects in the CMS generation, this closure checks 473 // if the object is "live" (reachable). Used in weak 474 // reference processing. 475 class CMSIsAliveClosure: public BoolObjectClosure { 476 const MemRegion _span; 477 const CMSBitMap* _bit_map; 478 479 friend class CMSCollector; 480 public: 481 CMSIsAliveClosure(MemRegion span, 482 CMSBitMap* bit_map): 483 _span(span), 484 _bit_map(bit_map) { 485 assert(!span.is_empty(), "Empty span could spell trouble"); 486 } 487 488 void do_object(oop obj) { 489 assert(false, "not to be invoked"); 490 } 491 492 bool do_object_b(oop obj); 493 }; 494 495 496 // Implements AbstractRefProcTaskExecutor for CMS. 497 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 498 public: 499 500 CMSRefProcTaskExecutor(CMSCollector& collector) 501 : _collector(collector) 502 { } 503 504 // Executes a task using worker threads. 505 virtual void execute(ProcessTask& task); 506 virtual void execute(EnqueueTask& task); 507 private: 508 CMSCollector& _collector; 509 }; 510 511 512 class CMSCollector: public CHeapObj<mtGC> { 513 friend class VMStructs; 514 friend class ConcurrentMarkSweepThread; 515 friend class ConcurrentMarkSweepGeneration; 516 friend class CompactibleFreeListSpace; 517 friend class CMSParRemarkTask; 518 friend class CMSConcMarkingTask; 519 friend class CMSRefProcTaskProxy; 520 friend class CMSRefProcTaskExecutor; 521 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 522 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 523 friend class PushOrMarkClosure; // to access _restart_addr 524 friend class Par_PushOrMarkClosure; // to access _restart_addr 525 friend class MarkFromRootsClosure; // -- ditto -- 526 // ... and for clearing cards 527 friend class Par_MarkFromRootsClosure; // to access _restart_addr 528 // ... and for clearing cards 529 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 530 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 531 friend class PushAndMarkVerifyClosure; // -- ditto -- 532 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 533 friend class PushAndMarkClosure; // -- ditto -- 534 friend class Par_PushAndMarkClosure; // -- ditto -- 535 friend class CMSKeepAliveClosure; // -- ditto -- 536 friend class CMSDrainMarkingStackClosure; // -- ditto -- 537 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 538 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 539 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 540 friend class VM_CMS_Operation; 541 friend class VM_CMS_Initial_Mark; 542 friend class VM_CMS_Final_Remark; 543 friend class TraceCMSMemoryManagerStats; 544 545 private: 546 jlong _time_of_last_gc; 547 void update_time_of_last_gc(jlong now) { 548 _time_of_last_gc = now; 549 } 550 551 OopTaskQueueSet* _task_queues; 552 553 // Overflow list of grey objects, threaded through mark-word 554 // Manipulated with CAS in the parallel/multi-threaded case. 555 oop _overflow_list; 556 // The following array-pair keeps track of mark words 557 // displaced for accomodating overflow list above. 558 // This code will likely be revisited under RFE#4922830. 559 Stack<oop, mtGC> _preserved_oop_stack; 560 Stack<markOop, mtGC> _preserved_mark_stack; 561 562 int* _hash_seed; 563 564 // In support of multi-threaded concurrent phases 565 YieldingFlexibleWorkGang* _conc_workers; 566 567 // Performance Counters 568 CollectorCounters* _gc_counters; 569 570 // Initialization Errors 571 bool _completed_initialization; 572 573 // In support of ExplicitGCInvokesConcurrent 574 static bool _full_gc_requested; 575 unsigned int _collection_count_start; 576 577 // Should we unload classes this concurrent cycle? 578 bool _should_unload_classes; 579 unsigned int _concurrent_cycles_since_last_unload; 580 unsigned int concurrent_cycles_since_last_unload() const { 581 return _concurrent_cycles_since_last_unload; 582 } 583 // Did we (allow) unload classes in the previous concurrent cycle? 584 bool unloaded_classes_last_cycle() const { 585 return concurrent_cycles_since_last_unload() == 0; 586 } 587 // Root scanning options for perm gen 588 int _roots_scanning_options; 589 int roots_scanning_options() const { return _roots_scanning_options; } 590 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 591 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 592 593 // Verification support 594 CMSBitMap _verification_mark_bm; 595 void verify_after_remark_work_1(); 596 void verify_after_remark_work_2(); 597 598 // true if any verification flag is on. 599 bool _verifying; 600 bool verifying() const { return _verifying; } 601 void set_verifying(bool v) { _verifying = v; } 602 603 // Collector policy 604 ConcurrentMarkSweepPolicy* _collector_policy; 605 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 606 607 void set_did_compact(bool v); 608 609 // XXX Move these to CMSStats ??? FIX ME !!! 610 elapsedTimer _inter_sweep_timer; // time between sweeps 611 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 612 // padded decaying average estimates of the above 613 AdaptivePaddedAverage _inter_sweep_estimate; 614 AdaptivePaddedAverage _intra_sweep_estimate; 615 616 protected: 617 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 618 MemRegion _span; // span covering above two 619 CardTableRS* _ct; // card table 620 621 // CMS marking support structures 622 CMSBitMap _markBitMap; 623 CMSBitMap _modUnionTable; 624 CMSMarkStack _markStack; 625 626 HeapWord* _restart_addr; // in support of marking stack overflow 627 void lower_restart_addr(HeapWord* low); 628 629 // Counters in support of marking stack / work queue overflow handling: 630 // a non-zero value indicates certain types of overflow events during 631 // the current CMS cycle and could lead to stack resizing efforts at 632 // an opportune future time. 633 size_t _ser_pmc_preclean_ovflw; 634 size_t _ser_pmc_remark_ovflw; 635 size_t _par_pmc_remark_ovflw; 636 size_t _ser_kac_preclean_ovflw; 637 size_t _ser_kac_ovflw; 638 size_t _par_kac_ovflw; 639 NOT_PRODUCT(ssize_t _num_par_pushes;) 640 641 // ("Weak") Reference processing support 642 ReferenceProcessor* _ref_processor; 643 CMSIsAliveClosure _is_alive_closure; 644 // keep this textually after _markBitMap and _span; c'tor dependency 645 646 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 647 ModUnionClosure _modUnionClosure; 648 ModUnionClosurePar _modUnionClosurePar; 649 650 // CMS abstract state machine 651 // initial_state: Idling 652 // next_state(Idling) = {Marking} 653 // next_state(Marking) = {Precleaning, Sweeping} 654 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 655 // next_state(AbortablePreclean) = {FinalMarking} 656 // next_state(FinalMarking) = {Sweeping} 657 // next_state(Sweeping) = {Resizing} 658 // next_state(Resizing) = {Resetting} 659 // next_state(Resetting) = {Idling} 660 // The numeric values below are chosen so that: 661 // . _collectorState <= Idling == post-sweep && pre-mark 662 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 663 // precleaning || abortablePrecleanb 664 public: 665 enum CollectorState { 666 Resizing = 0, 667 Resetting = 1, 668 Idling = 2, 669 InitialMarking = 3, 670 Marking = 4, 671 Precleaning = 5, 672 AbortablePreclean = 6, 673 FinalMarking = 7, 674 Sweeping = 8 675 }; 676 protected: 677 static CollectorState _collectorState; 678 679 // State related to prologue/epilogue invocation for my generations 680 bool _between_prologue_and_epilogue; 681 682 // Signalling/State related to coordination between fore- and backgroud GC 683 // Note: When the baton has been passed from background GC to foreground GC, 684 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 685 static bool _foregroundGCIsActive; // true iff foreground collector is active or 686 // wants to go active 687 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 688 // yet passed the baton to the foreground GC 689 690 // Support for CMSScheduleRemark (abortable preclean) 691 bool _abort_preclean; 692 bool _start_sampling; 693 694 int _numYields; 695 size_t _numDirtyCards; 696 size_t _sweep_count; 697 // number of full gc's since the last concurrent gc. 698 uint _full_gcs_since_conc_gc; 699 700 // occupancy used for bootstrapping stats 701 double _bootstrap_occupancy; 702 703 // timer 704 elapsedTimer _timer; 705 706 // Timing, allocation and promotion statistics, used for scheduling. 707 CMSStats _stats; 708 709 // Allocation limits installed in the young gen, used only in 710 // CMSIncrementalMode. When an allocation in the young gen would cross one of 711 // these limits, the cms generation is notified and the cms thread is started 712 // or stopped, respectively. 713 HeapWord* _icms_start_limit; 714 HeapWord* _icms_stop_limit; 715 716 enum CMS_op_type { 717 CMS_op_checkpointRootsInitial, 718 CMS_op_checkpointRootsFinal 719 }; 720 721 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 722 bool stop_world_and_do(CMS_op_type op); 723 724 OopTaskQueueSet* task_queues() { return _task_queues; } 725 int* hash_seed(int i) { return &_hash_seed[i]; } 726 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 727 728 // Support for parallelizing Eden rescan in CMS remark phase 729 void sample_eden(); // ... sample Eden space top 730 731 private: 732 // Support for parallelizing young gen rescan in CMS remark phase 733 Generation* _young_gen; // the younger gen 734 HeapWord** _top_addr; // ... Top of Eden 735 HeapWord** _end_addr; // ... End of Eden 736 HeapWord** _eden_chunk_array; // ... Eden partitioning array 737 size_t _eden_chunk_index; // ... top (exclusive) of array 738 size_t _eden_chunk_capacity; // ... max entries in array 739 // This is meant to be a boolean flag, but jbyte for CAS. 740 jbyte _eden_chunk_sampling_active; 741 742 // Support for parallelizing survivor space rescan 743 HeapWord** _survivor_chunk_array; 744 size_t _survivor_chunk_index; 745 size_t _survivor_chunk_capacity; 746 size_t* _cursor; 747 ChunkArray* _survivor_plab_array; 748 749 // Support for marking stack overflow handling 750 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 751 bool par_take_from_overflow_list(size_t num, 752 OopTaskQueue* to_work_q, 753 int no_of_gc_threads); 754 void push_on_overflow_list(oop p); 755 void par_push_on_overflow_list(oop p); 756 // the following is, obviously, not, in general, "MT-stable" 757 bool overflow_list_is_empty() const; 758 759 void preserve_mark_if_necessary(oop p); 760 void par_preserve_mark_if_necessary(oop p); 761 void preserve_mark_work(oop p, markOop m); 762 void restore_preserved_marks_if_any(); 763 NOT_PRODUCT(bool no_preserved_marks() const;) 764 // in support of testing overflow code 765 NOT_PRODUCT(int _overflow_counter;) 766 NOT_PRODUCT(bool simulate_overflow();) // sequential 767 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 768 769 // CMS work methods 770 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 771 772 // a return value of false indicates failure due to stack overflow 773 bool markFromRootsWork(bool asynch); // concurrent marking work 774 775 public: // FIX ME!!! only for testing 776 bool do_marking_st(bool asynch); // single-threaded marking 777 bool do_marking_mt(bool asynch); // multi-threaded marking 778 779 private: 780 781 // concurrent precleaning work 782 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 783 ScanMarkedObjectsAgainCarefullyClosure* cl); 784 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 785 ScanMarkedObjectsAgainCarefullyClosure* cl); 786 // Does precleaning work, returning a quantity indicative of 787 // the amount of "useful work" done. 788 size_t preclean_work(bool clean_refs, bool clean_survivors); 789 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 790 void abortable_preclean(); // Preclean while looking for possible abort 791 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 792 // Helper function for above; merge-sorts the per-thread plab samples 793 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 794 // Resets (i.e. clears) the per-thread plab sample vectors 795 void reset_survivor_plab_arrays(); 796 797 // final (second) checkpoint work 798 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 799 bool init_mark_was_synchronous); 800 // work routine for parallel version of remark 801 void do_remark_parallel(); 802 // work routine for non-parallel version of remark 803 void do_remark_non_parallel(); 804 // reference processing work routine (during second checkpoint) 805 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 806 807 // concurrent sweeping work 808 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 809 810 // (concurrent) resetting of support data structures 811 void reset(bool asynch); 812 813 // Clear _expansion_cause fields of constituent generations 814 void clear_expansion_cause(); 815 816 // An auxilliary method used to record the ends of 817 // used regions of each generation to limit the extent of sweep 818 void save_sweep_limits(); 819 820 // A work method used by foreground collection to determine 821 // what type of collection (compacting or not, continuing or fresh) 822 // it should do. 823 void decide_foreground_collection_type(bool clear_all_soft_refs, 824 bool* should_compact, bool* should_start_over); 825 826 // A work method used by the foreground collector to do 827 // a mark-sweep-compact. 828 void do_compaction_work(bool clear_all_soft_refs); 829 830 // A work method used by the foreground collector to do 831 // a mark-sweep, after taking over from a possibly on-going 832 // concurrent mark-sweep collection. 833 void do_mark_sweep_work(bool clear_all_soft_refs, 834 CollectorState first_state, bool should_start_over); 835 836 // If the backgrould GC is active, acquire control from the background 837 // GC and do the collection. 838 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 839 840 // For synchronizing passing of control from background to foreground 841 // GC. waitForForegroundGC() is called by the background 842 // collector. It if had to wait for a foreground collection, 843 // it returns true and the background collection should assume 844 // that the collection was finished by the foreground 845 // collector. 846 bool waitForForegroundGC(); 847 848 // Incremental mode triggering: recompute the icms duty cycle and set the 849 // allocation limits in the young gen. 850 void icms_update_allocation_limits(); 851 852 size_t block_size_using_printezis_bits(HeapWord* addr) const; 853 size_t block_size_if_printezis_bits(HeapWord* addr) const; 854 HeapWord* next_card_start_after_block(HeapWord* addr) const; 855 856 void setup_cms_unloading_and_verification_state(); 857 public: 858 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 859 CardTableRS* ct, 860 ConcurrentMarkSweepPolicy* cp); 861 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 862 863 ReferenceProcessor* ref_processor() { return _ref_processor; } 864 void ref_processor_init(); 865 866 Mutex* bitMapLock() const { return _markBitMap.lock(); } 867 static CollectorState abstract_state() { return _collectorState; } 868 869 bool should_abort_preclean() const; // Whether preclean should be aborted. 870 size_t get_eden_used() const; 871 size_t get_eden_capacity() const; 872 873 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 874 875 // locking checks 876 NOT_PRODUCT(static bool have_cms_token();) 877 878 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 879 bool shouldConcurrentCollect(); 880 881 void collect(bool full, 882 bool clear_all_soft_refs, 883 size_t size, 884 bool tlab); 885 void collect_in_background(bool clear_all_soft_refs); 886 void collect_in_foreground(bool clear_all_soft_refs); 887 888 // In support of ExplicitGCInvokesConcurrent 889 static void request_full_gc(unsigned int full_gc_count); 890 // Should we unload classes in a particular concurrent cycle? 891 bool should_unload_classes() const { 892 return _should_unload_classes; 893 } 894 void update_should_unload_classes(); 895 896 void direct_allocated(HeapWord* start, size_t size); 897 898 // Object is dead if not marked and current phase is sweeping. 899 bool is_dead_obj(oop obj) const; 900 901 // After a promotion (of "start"), do any necessary marking. 902 // If "par", then it's being done by a parallel GC thread. 903 // The last two args indicate if we need precise marking 904 // and if so the size of the object so it can be dirtied 905 // in its entirety. 906 void promoted(bool par, HeapWord* start, 907 bool is_obj_array, size_t obj_size); 908 909 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 910 size_t word_size); 911 912 void getFreelistLocks() const; 913 void releaseFreelistLocks() const; 914 bool haveFreelistLocks() const; 915 916 // Adjust size of underlying generation 917 void compute_new_size(); 918 919 // GC prologue and epilogue 920 void gc_prologue(bool full); 921 void gc_epilogue(bool full); 922 923 jlong time_of_last_gc(jlong now) { 924 if (_collectorState <= Idling) { 925 // gc not in progress 926 return _time_of_last_gc; 927 } else { 928 // collection in progress 929 return now; 930 } 931 } 932 933 // Support for parallel remark of survivor space 934 void* get_data_recorder(int thr_num); 935 void sample_eden_chunk(); 936 937 CMSBitMap* markBitMap() { return &_markBitMap; } 938 void directAllocated(HeapWord* start, size_t size); 939 940 // main CMS steps and related support 941 void checkpointRootsInitial(bool asynch); 942 bool markFromRoots(bool asynch); // a return value of false indicates failure 943 // due to stack overflow 944 void preclean(); 945 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 946 bool init_mark_was_synchronous); 947 void sweep(bool asynch); 948 949 // Check that the currently executing thread is the expected 950 // one (foreground collector or background collector). 951 static void check_correct_thread_executing() PRODUCT_RETURN; 952 // XXXPERM void print_statistics() PRODUCT_RETURN; 953 954 bool is_cms_reachable(HeapWord* addr); 955 956 // Performance Counter Support 957 CollectorCounters* counters() { return _gc_counters; } 958 959 // timer stuff 960 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 961 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 962 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 963 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 964 965 int yields() { return _numYields; } 966 void resetYields() { _numYields = 0; } 967 void incrementYields() { _numYields++; } 968 void resetNumDirtyCards() { _numDirtyCards = 0; } 969 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 970 size_t numDirtyCards() { return _numDirtyCards; } 971 972 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 973 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 974 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 975 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 976 size_t sweep_count() const { return _sweep_count; } 977 void increment_sweep_count() { _sweep_count++; } 978 979 // Timers/stats for gc scheduling and incremental mode pacing. 980 CMSStats& stats() { return _stats; } 981 982 // Convenience methods that check whether CMSIncrementalMode is enabled and 983 // forward to the corresponding methods in ConcurrentMarkSweepThread. 984 static void start_icms(); 985 static void stop_icms(); // Called at the end of the cms cycle. 986 static void disable_icms(); // Called before a foreground collection. 987 static void enable_icms(); // Called after a foreground collection. 988 void icms_wait(); // Called at yield points. 989 990 // Adaptive size policy 991 CMSAdaptiveSizePolicy* size_policy(); 992 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 993 994 static void print_on_error(outputStream* st); 995 996 // debugging 997 void verify(); 998 bool verify_after_remark(bool silent = VerifySilently); 999 void verify_ok_to_terminate() const PRODUCT_RETURN; 1000 void verify_work_stacks_empty() const PRODUCT_RETURN; 1001 void verify_overflow_empty() const PRODUCT_RETURN; 1002 1003 // convenience methods in support of debugging 1004 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 1005 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 1006 1007 // accessors 1008 CMSMarkStack* verification_mark_stack() { return &_markStack; } 1009 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 1010 1011 // Initialization errors 1012 bool completed_initialization() { return _completed_initialization; } 1013 1014 void print_eden_and_survivor_chunk_arrays(); 1015 }; 1016 1017 class CMSExpansionCause : public AllStatic { 1018 public: 1019 enum Cause { 1020 _no_expansion, 1021 _satisfy_free_ratio, 1022 _satisfy_promotion, 1023 _satisfy_allocation, 1024 _allocate_par_lab, 1025 _allocate_par_spooling_space, 1026 _adaptive_size_policy 1027 }; 1028 // Return a string describing the cause of the expansion. 1029 static const char* to_string(CMSExpansionCause::Cause cause); 1030 }; 1031 1032 class ConcurrentMarkSweepGeneration: public CardGeneration { 1033 friend class VMStructs; 1034 friend class ConcurrentMarkSweepThread; 1035 friend class ConcurrentMarkSweep; 1036 friend class CMSCollector; 1037 protected: 1038 static CMSCollector* _collector; // the collector that collects us 1039 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1040 1041 // Performance Counters 1042 GenerationCounters* _gen_counters; 1043 GSpaceCounters* _space_counters; 1044 1045 // Words directly allocated, used by CMSStats. 1046 size_t _direct_allocated_words; 1047 1048 // Non-product stat counters 1049 NOT_PRODUCT( 1050 size_t _numObjectsPromoted; 1051 size_t _numWordsPromoted; 1052 size_t _numObjectsAllocated; 1053 size_t _numWordsAllocated; 1054 ) 1055 1056 // Used for sizing decisions 1057 bool _incremental_collection_failed; 1058 bool incremental_collection_failed() { 1059 return _incremental_collection_failed; 1060 } 1061 void set_incremental_collection_failed() { 1062 _incremental_collection_failed = true; 1063 } 1064 void clear_incremental_collection_failed() { 1065 _incremental_collection_failed = false; 1066 } 1067 1068 // accessors 1069 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1070 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1071 1072 private: 1073 // For parallel young-gen GC support. 1074 CMSParGCThreadState** _par_gc_thread_states; 1075 1076 // Reason generation was expanded 1077 CMSExpansionCause::Cause _expansion_cause; 1078 1079 // In support of MinChunkSize being larger than min object size 1080 const double _dilatation_factor; 1081 1082 enum CollectionTypes { 1083 Concurrent_collection_type = 0, 1084 MS_foreground_collection_type = 1, 1085 MSC_foreground_collection_type = 2, 1086 Unknown_collection_type = 3 1087 }; 1088 1089 CollectionTypes _debug_collection_type; 1090 1091 // True if a compactiing collection was done. 1092 bool _did_compact; 1093 bool did_compact() { return _did_compact; } 1094 1095 // Fraction of current occupancy at which to start a CMS collection which 1096 // will collect this generation (at least). 1097 double _initiating_occupancy; 1098 1099 protected: 1100 // Shrink generation by specified size (returns false if unable to shrink) 1101 void shrink_free_list_by(size_t bytes); 1102 1103 // Update statistics for GC 1104 virtual void update_gc_stats(int level, bool full); 1105 1106 // Maximum available space in the generation (including uncommitted) 1107 // space. 1108 size_t max_available() const; 1109 1110 // getter and initializer for _initiating_occupancy field. 1111 double initiating_occupancy() const { return _initiating_occupancy; } 1112 void init_initiating_occupancy(intx io, uintx tr); 1113 1114 public: 1115 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1116 int level, CardTableRS* ct, 1117 bool use_adaptive_freelists, 1118 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1119 1120 // Accessors 1121 CMSCollector* collector() const { return _collector; } 1122 static void set_collector(CMSCollector* collector) { 1123 assert(_collector == NULL, "already set"); 1124 _collector = collector; 1125 } 1126 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1127 1128 Mutex* freelistLock() const; 1129 1130 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1131 1132 // Adaptive size policy 1133 CMSAdaptiveSizePolicy* size_policy(); 1134 1135 void set_did_compact(bool v) { _did_compact = v; } 1136 1137 bool refs_discovery_is_atomic() const { return false; } 1138 bool refs_discovery_is_mt() const { 1139 // Note: CMS does MT-discovery during the parallel-remark 1140 // phases. Use ReferenceProcessorMTMutator to make refs 1141 // discovery MT-safe during such phases or other parallel 1142 // discovery phases in the future. This may all go away 1143 // if/when we decide that refs discovery is sufficiently 1144 // rare that the cost of the CAS's involved is in the 1145 // noise. That's a measurement that should be done, and 1146 // the code simplified if that turns out to be the case. 1147 return ConcGCThreads > 1; 1148 } 1149 1150 // Override 1151 virtual void ref_processor_init(); 1152 1153 // Grow generation by specified size (returns false if unable to grow) 1154 bool grow_by(size_t bytes); 1155 // Grow generation to reserved size. 1156 bool grow_to_reserved(); 1157 1158 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1159 1160 // Space enquiries 1161 size_t capacity() const; 1162 size_t used() const; 1163 size_t free() const; 1164 double occupancy() const { return ((double)used())/((double)capacity()); } 1165 size_t contiguous_available() const; 1166 size_t unsafe_max_alloc_nogc() const; 1167 1168 // over-rides 1169 MemRegion used_region() const; 1170 MemRegion used_region_at_save_marks() const; 1171 1172 // Does a "full" (forced) collection invoked on this generation collect 1173 // all younger generations as well? Note that the second conjunct is a 1174 // hack to allow the collection of the younger gen first if the flag is 1175 // set. This is better than using th policy's should_collect_gen0_first() 1176 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1177 virtual bool full_collects_younger_generations() const { 1178 return UseCMSCompactAtFullCollection && !CollectGen0First; 1179 } 1180 1181 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1182 1183 // Support for compaction 1184 CompactibleSpace* first_compaction_space() const; 1185 // Adjust quantites in the generation affected by 1186 // the compaction. 1187 void reset_after_compaction(); 1188 1189 // Allocation support 1190 HeapWord* allocate(size_t size, bool tlab); 1191 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1192 oop promote(oop obj, size_t obj_size); 1193 HeapWord* par_allocate(size_t size, bool tlab) { 1194 return allocate(size, tlab); 1195 } 1196 1197 // Incremental mode triggering. 1198 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1199 size_t word_size); 1200 1201 // Used by CMSStats to track direct allocation. The value is sampled and 1202 // reset after each young gen collection. 1203 size_t direct_allocated_words() const { return _direct_allocated_words; } 1204 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1205 1206 // Overrides for parallel promotion. 1207 virtual oop par_promote(int thread_num, 1208 oop obj, markOop m, size_t word_sz); 1209 // This one should not be called for CMS. 1210 virtual void par_promote_alloc_undo(int thread_num, 1211 HeapWord* obj, size_t word_sz); 1212 virtual void par_promote_alloc_done(int thread_num); 1213 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1214 1215 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1216 1217 // Inform this (non-young) generation that a promotion failure was 1218 // encountered during a collection of a younger generation that 1219 // promotes into this generation. 1220 virtual void promotion_failure_occurred(); 1221 1222 bool should_collect(bool full, size_t size, bool tlab); 1223 virtual bool should_concurrent_collect() const; 1224 virtual bool is_too_full() const; 1225 void collect(bool full, 1226 bool clear_all_soft_refs, 1227 size_t size, 1228 bool tlab); 1229 1230 HeapWord* expand_and_allocate(size_t word_size, 1231 bool tlab, 1232 bool parallel = false); 1233 1234 // GC prologue and epilogue 1235 void gc_prologue(bool full); 1236 void gc_prologue_work(bool full, bool registerClosure, 1237 ModUnionClosure* modUnionClosure); 1238 void gc_epilogue(bool full); 1239 void gc_epilogue_work(bool full); 1240 1241 // Time since last GC of this generation 1242 jlong time_of_last_gc(jlong now) { 1243 return collector()->time_of_last_gc(now); 1244 } 1245 void update_time_of_last_gc(jlong now) { 1246 collector()-> update_time_of_last_gc(now); 1247 } 1248 1249 // Allocation failure 1250 void expand(size_t bytes, size_t expand_bytes, 1251 CMSExpansionCause::Cause cause); 1252 virtual bool expand(size_t bytes, size_t expand_bytes); 1253 void shrink(size_t bytes); 1254 void shrink_by(size_t bytes); 1255 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1256 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1257 1258 // Iteration support and related enquiries 1259 void save_marks(); 1260 bool no_allocs_since_save_marks(); 1261 void object_iterate_since_last_GC(ObjectClosure* cl); 1262 void younger_refs_iterate(OopsInGenClosure* cl); 1263 1264 // Iteration support specific to CMS generations 1265 void save_sweep_limit(); 1266 1267 // More iteration support 1268 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); 1269 virtual void oop_iterate(ExtendedOopClosure* cl); 1270 virtual void safe_object_iterate(ObjectClosure* cl); 1271 virtual void object_iterate(ObjectClosure* cl); 1272 1273 // Need to declare the full complement of closures, whether we'll 1274 // override them or not, or get message from the compiler: 1275 // oop_since_save_marks_iterate_nv hides virtual function... 1276 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1277 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1278 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1279 1280 // Smart allocation XXX -- move to CFLSpace? 1281 void setNearLargestChunk(); 1282 bool isNearLargestChunk(HeapWord* addr); 1283 1284 // Get the chunk at the end of the space. Delagates to 1285 // the space. 1286 FreeChunk* find_chunk_at_end(); 1287 1288 void post_compact(); 1289 1290 // Debugging 1291 void prepare_for_verify(); 1292 void verify(); 1293 void print_statistics() PRODUCT_RETURN; 1294 1295 // Performance Counters support 1296 virtual void update_counters(); 1297 virtual void update_counters(size_t used); 1298 void initialize_performance_counters(); 1299 CollectorCounters* counters() { return collector()->counters(); } 1300 1301 // Support for parallel remark of survivor space 1302 void* get_data_recorder(int thr_num) { 1303 //Delegate to collector 1304 return collector()->get_data_recorder(thr_num); 1305 } 1306 void sample_eden_chunk() { 1307 //Delegate to collector 1308 return collector()->sample_eden_chunk(); 1309 } 1310 1311 // Printing 1312 const char* name() const; 1313 virtual const char* short_name() const { return "CMS"; } 1314 void print() const; 1315 void printOccupancy(const char* s); 1316 bool must_be_youngest() const { return false; } 1317 bool must_be_oldest() const { return true; } 1318 1319 // Resize the generation after a compacting GC. The 1320 // generation can be treated as a contiguous space 1321 // after the compaction. 1322 virtual void compute_new_size(); 1323 // Resize the generation after a non-compacting 1324 // collection. 1325 void compute_new_size_free_list(); 1326 1327 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1328 void rotate_debug_collection_type(); 1329 }; 1330 1331 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1332 1333 // Return the size policy from the heap's collector 1334 // policy casted to CMSAdaptiveSizePolicy*. 1335 CMSAdaptiveSizePolicy* cms_size_policy() const; 1336 1337 // Resize the generation based on the adaptive size 1338 // policy. 1339 void resize(size_t cur_promo, size_t desired_promo); 1340 1341 // Return the GC counters from the collector policy 1342 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1343 1344 virtual void shrink_by(size_t bytes); 1345 1346 public: 1347 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1348 int level, CardTableRS* ct, 1349 bool use_adaptive_freelists, 1350 FreeBlockDictionary<FreeChunk>::DictionaryChoice 1351 dictionaryChoice) : 1352 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1353 use_adaptive_freelists, dictionaryChoice) {} 1354 1355 virtual const char* short_name() const { return "ASCMS"; } 1356 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1357 1358 virtual void update_counters(); 1359 virtual void update_counters(size_t used); 1360 }; 1361 1362 // 1363 // Closures of various sorts used by CMS to accomplish its work 1364 // 1365 1366 // This closure is used to check that a certain set of oops is empty. 1367 class FalseClosure: public OopClosure { 1368 public: 1369 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } 1370 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } 1371 }; 1372 1373 // This closure is used to do concurrent marking from the roots 1374 // following the first checkpoint. 1375 class MarkFromRootsClosure: public BitMapClosure { 1376 CMSCollector* _collector; 1377 MemRegion _span; 1378 CMSBitMap* _bitMap; 1379 CMSBitMap* _mut; 1380 CMSMarkStack* _markStack; 1381 bool _yield; 1382 int _skipBits; 1383 HeapWord* _finger; 1384 HeapWord* _threshold; 1385 DEBUG_ONLY(bool _verifying;) 1386 1387 public: 1388 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1389 CMSBitMap* bitMap, 1390 CMSMarkStack* markStack, 1391 bool should_yield, bool verifying = false); 1392 bool do_bit(size_t offset); 1393 void reset(HeapWord* addr); 1394 inline void do_yield_check(); 1395 1396 private: 1397 void scanOopsInOop(HeapWord* ptr); 1398 void do_yield_work(); 1399 }; 1400 1401 // This closure is used to do concurrent multi-threaded 1402 // marking from the roots following the first checkpoint. 1403 // XXX This should really be a subclass of The serial version 1404 // above, but i have not had the time to refactor things cleanly. 1405 // That willbe done for Dolphin. 1406 class Par_MarkFromRootsClosure: public BitMapClosure { 1407 CMSCollector* _collector; 1408 MemRegion _whole_span; 1409 MemRegion _span; 1410 CMSBitMap* _bit_map; 1411 CMSBitMap* _mut; 1412 OopTaskQueue* _work_queue; 1413 CMSMarkStack* _overflow_stack; 1414 bool _yield; 1415 int _skip_bits; 1416 HeapWord* _finger; 1417 HeapWord* _threshold; 1418 CMSConcMarkingTask* _task; 1419 public: 1420 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1421 MemRegion span, 1422 CMSBitMap* bit_map, 1423 OopTaskQueue* work_queue, 1424 CMSMarkStack* overflow_stack, 1425 bool should_yield); 1426 bool do_bit(size_t offset); 1427 inline void do_yield_check(); 1428 1429 private: 1430 void scan_oops_in_oop(HeapWord* ptr); 1431 void do_yield_work(); 1432 bool get_work_from_overflow_stack(); 1433 }; 1434 1435 // The following closures are used to do certain kinds of verification of 1436 // CMS marking. 1437 class PushAndMarkVerifyClosure: public CMSOopClosure { 1438 CMSCollector* _collector; 1439 MemRegion _span; 1440 CMSBitMap* _verification_bm; 1441 CMSBitMap* _cms_bm; 1442 CMSMarkStack* _mark_stack; 1443 protected: 1444 void do_oop(oop p); 1445 template <class T> inline void do_oop_work(T *p) { 1446 oop obj = oopDesc::load_decode_heap_oop(p); 1447 do_oop(obj); 1448 } 1449 public: 1450 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1451 MemRegion span, 1452 CMSBitMap* verification_bm, 1453 CMSBitMap* cms_bm, 1454 CMSMarkStack* mark_stack); 1455 void do_oop(oop* p); 1456 void do_oop(narrowOop* p); 1457 1458 // Deal with a stack overflow condition 1459 void handle_stack_overflow(HeapWord* lost); 1460 }; 1461 1462 class MarkFromRootsVerifyClosure: public BitMapClosure { 1463 CMSCollector* _collector; 1464 MemRegion _span; 1465 CMSBitMap* _verification_bm; 1466 CMSBitMap* _cms_bm; 1467 CMSMarkStack* _mark_stack; 1468 HeapWord* _finger; 1469 PushAndMarkVerifyClosure _pam_verify_closure; 1470 public: 1471 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1472 CMSBitMap* verification_bm, 1473 CMSBitMap* cms_bm, 1474 CMSMarkStack* mark_stack); 1475 bool do_bit(size_t offset); 1476 void reset(HeapWord* addr); 1477 }; 1478 1479 1480 // This closure is used to check that a certain set of bits is 1481 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1482 class FalseBitMapClosure: public BitMapClosure { 1483 public: 1484 bool do_bit(size_t offset) { 1485 guarantee(false, "Should not have a 1 bit"); 1486 return true; 1487 } 1488 }; 1489 1490 // This closure is used during the second checkpointing phase 1491 // to rescan the marked objects on the dirty cards in the mod 1492 // union table and the card table proper. It's invoked via 1493 // MarkFromDirtyCardsClosure below. It uses either 1494 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1495 // declared in genOopClosures.hpp to accomplish some of its work. 1496 // In the parallel case the bitMap is shared, so access to 1497 // it needs to be suitably synchronized for updates by embedded 1498 // closures that update it; however, this closure itself only 1499 // reads the bit_map and because it is idempotent, is immune to 1500 // reading stale values. 1501 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1502 #ifdef ASSERT 1503 CMSCollector* _collector; 1504 MemRegion _span; 1505 union { 1506 CMSMarkStack* _mark_stack; 1507 OopTaskQueue* _work_queue; 1508 }; 1509 #endif // ASSERT 1510 bool _parallel; 1511 CMSBitMap* _bit_map; 1512 union { 1513 MarkRefsIntoAndScanClosure* _scan_closure; 1514 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1515 }; 1516 1517 public: 1518 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1519 MemRegion span, 1520 ReferenceProcessor* rp, 1521 CMSBitMap* bit_map, 1522 CMSMarkStack* mark_stack, 1523 MarkRefsIntoAndScanClosure* cl): 1524 #ifdef ASSERT 1525 _collector(collector), 1526 _span(span), 1527 _mark_stack(mark_stack), 1528 #endif // ASSERT 1529 _parallel(false), 1530 _bit_map(bit_map), 1531 _scan_closure(cl) { } 1532 1533 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1534 MemRegion span, 1535 ReferenceProcessor* rp, 1536 CMSBitMap* bit_map, 1537 OopTaskQueue* work_queue, 1538 Par_MarkRefsIntoAndScanClosure* cl): 1539 #ifdef ASSERT 1540 _collector(collector), 1541 _span(span), 1542 _work_queue(work_queue), 1543 #endif // ASSERT 1544 _parallel(true), 1545 _bit_map(bit_map), 1546 _par_scan_closure(cl) { } 1547 1548 void do_object(oop obj) { 1549 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1550 } 1551 bool do_object_b(oop obj) { 1552 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1553 return false; 1554 } 1555 bool do_object_bm(oop p, MemRegion mr); 1556 }; 1557 1558 // This closure is used during the second checkpointing phase 1559 // to rescan the marked objects on the dirty cards in the mod 1560 // union table and the card table proper. It invokes 1561 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1562 // In the parallel case, the bit map is shared and requires 1563 // synchronized access. 1564 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1565 CompactibleFreeListSpace* _space; 1566 ScanMarkedObjectsAgainClosure _scan_cl; 1567 size_t _num_dirty_cards; 1568 1569 public: 1570 MarkFromDirtyCardsClosure(CMSCollector* collector, 1571 MemRegion span, 1572 CompactibleFreeListSpace* space, 1573 CMSBitMap* bit_map, 1574 CMSMarkStack* mark_stack, 1575 MarkRefsIntoAndScanClosure* cl): 1576 _space(space), 1577 _num_dirty_cards(0), 1578 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1579 mark_stack, cl) { } 1580 1581 MarkFromDirtyCardsClosure(CMSCollector* collector, 1582 MemRegion span, 1583 CompactibleFreeListSpace* space, 1584 CMSBitMap* bit_map, 1585 OopTaskQueue* work_queue, 1586 Par_MarkRefsIntoAndScanClosure* cl): 1587 _space(space), 1588 _num_dirty_cards(0), 1589 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1590 work_queue, cl) { } 1591 1592 void do_MemRegion(MemRegion mr); 1593 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1594 size_t num_dirty_cards() { return _num_dirty_cards; } 1595 }; 1596 1597 // This closure is used in the non-product build to check 1598 // that there are no MemRegions with a certain property. 1599 class FalseMemRegionClosure: public MemRegionClosure { 1600 void do_MemRegion(MemRegion mr) { 1601 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1602 guarantee(false, "Should never be here"); 1603 } 1604 }; 1605 1606 // This closure is used during the precleaning phase 1607 // to "carefully" rescan marked objects on dirty cards. 1608 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1609 // to accomplish some of its work. 1610 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1611 CMSCollector* _collector; 1612 MemRegion _span; 1613 bool _yield; 1614 Mutex* _freelistLock; 1615 CMSBitMap* _bitMap; 1616 CMSMarkStack* _markStack; 1617 MarkRefsIntoAndScanClosure* _scanningClosure; 1618 1619 public: 1620 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1621 MemRegion span, 1622 CMSBitMap* bitMap, 1623 CMSMarkStack* markStack, 1624 MarkRefsIntoAndScanClosure* cl, 1625 bool should_yield): 1626 _collector(collector), 1627 _span(span), 1628 _yield(should_yield), 1629 _bitMap(bitMap), 1630 _markStack(markStack), 1631 _scanningClosure(cl) { 1632 } 1633 1634 void do_object(oop p) { 1635 guarantee(false, "call do_object_careful instead"); 1636 } 1637 1638 size_t do_object_careful(oop p) { 1639 guarantee(false, "Unexpected caller"); 1640 return 0; 1641 } 1642 1643 size_t do_object_careful_m(oop p, MemRegion mr); 1644 1645 void setFreelistLock(Mutex* m) { 1646 _freelistLock = m; 1647 _scanningClosure->set_freelistLock(m); 1648 } 1649 1650 private: 1651 inline bool do_yield_check(); 1652 1653 void do_yield_work(); 1654 }; 1655 1656 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1657 CMSCollector* _collector; 1658 MemRegion _span; 1659 bool _yield; 1660 CMSBitMap* _bit_map; 1661 CMSMarkStack* _mark_stack; 1662 PushAndMarkClosure* _scanning_closure; 1663 unsigned int _before_count; 1664 1665 public: 1666 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1667 MemRegion span, 1668 CMSBitMap* bit_map, 1669 CMSMarkStack* mark_stack, 1670 PushAndMarkClosure* cl, 1671 unsigned int before_count, 1672 bool should_yield): 1673 _collector(collector), 1674 _span(span), 1675 _yield(should_yield), 1676 _bit_map(bit_map), 1677 _mark_stack(mark_stack), 1678 _scanning_closure(cl), 1679 _before_count(before_count) 1680 { } 1681 1682 void do_object(oop p) { 1683 guarantee(false, "call do_object_careful instead"); 1684 } 1685 1686 size_t do_object_careful(oop p); 1687 1688 size_t do_object_careful_m(oop p, MemRegion mr) { 1689 guarantee(false, "Unexpected caller"); 1690 return 0; 1691 } 1692 1693 private: 1694 inline void do_yield_check(); 1695 void do_yield_work(); 1696 }; 1697 1698 // This closure is used to accomplish the sweeping work 1699 // after the second checkpoint but before the concurrent reset 1700 // phase. 1701 // 1702 // Terminology 1703 // left hand chunk (LHC) - block of one or more chunks currently being 1704 // coalesced. The LHC is available for coalescing with a new chunk. 1705 // right hand chunk (RHC) - block that is currently being swept that is 1706 // free or garbage that can be coalesced with the LHC. 1707 // _inFreeRange is true if there is currently a LHC 1708 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1709 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1710 // _freeFinger is the address of the current LHC 1711 class SweepClosure: public BlkClosureCareful { 1712 CMSCollector* _collector; // collector doing the work 1713 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1714 CompactibleFreeListSpace* _sp; // Space being swept 1715 HeapWord* _limit;// the address at or above which the sweep should stop 1716 // because we do not expect newly garbage blocks 1717 // eligible for sweeping past that address. 1718 Mutex* _freelistLock; // Free list lock (in space) 1719 CMSBitMap* _bitMap; // Marking bit map (in 1720 // generation) 1721 bool _inFreeRange; // Indicates if we are in the 1722 // midst of a free run 1723 bool _freeRangeInFreeLists; 1724 // Often, we have just found 1725 // a free chunk and started 1726 // a new free range; we do not 1727 // eagerly remove this chunk from 1728 // the free lists unless there is 1729 // a possibility of coalescing. 1730 // When true, this flag indicates 1731 // that the _freeFinger below 1732 // points to a potentially free chunk 1733 // that may still be in the free lists 1734 bool _lastFreeRangeCoalesced; 1735 // free range contains chunks 1736 // coalesced 1737 bool _yield; 1738 // Whether sweeping should be 1739 // done with yields. For instance 1740 // when done by the foreground 1741 // collector we shouldn't yield. 1742 HeapWord* _freeFinger; // When _inFreeRange is set, the 1743 // pointer to the "left hand 1744 // chunk" 1745 size_t _freeRangeSize; 1746 // When _inFreeRange is set, this 1747 // indicates the accumulated size 1748 // of the "left hand chunk" 1749 NOT_PRODUCT( 1750 size_t _numObjectsFreed; 1751 size_t _numWordsFreed; 1752 size_t _numObjectsLive; 1753 size_t _numWordsLive; 1754 size_t _numObjectsAlreadyFree; 1755 size_t _numWordsAlreadyFree; 1756 FreeChunk* _last_fc; 1757 ) 1758 private: 1759 // Code that is common to a free chunk or garbage when 1760 // encountered during sweeping. 1761 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1762 // Process a free chunk during sweeping. 1763 void do_already_free_chunk(FreeChunk *fc); 1764 // Work method called when processing an already free or a 1765 // freshly garbage chunk to do a lookahead and possibly a 1766 // premptive flush if crossing over _limit. 1767 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1768 // Process a garbage chunk during sweeping. 1769 size_t do_garbage_chunk(FreeChunk *fc); 1770 // Process a live chunk during sweeping. 1771 size_t do_live_chunk(FreeChunk* fc); 1772 1773 // Accessors. 1774 HeapWord* freeFinger() const { return _freeFinger; } 1775 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1776 bool inFreeRange() const { return _inFreeRange; } 1777 void set_inFreeRange(bool v) { _inFreeRange = v; } 1778 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1779 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1780 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1781 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1782 1783 // Initialize a free range. 1784 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1785 // Return this chunk to the free lists. 1786 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1787 1788 // Check if we should yield and do so when necessary. 1789 inline void do_yield_check(HeapWord* addr); 1790 1791 // Yield 1792 void do_yield_work(HeapWord* addr); 1793 1794 // Debugging/Printing 1795 void print_free_block_coalesced(FreeChunk* fc) const; 1796 1797 public: 1798 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1799 CMSBitMap* bitMap, bool should_yield); 1800 ~SweepClosure() PRODUCT_RETURN; 1801 1802 size_t do_blk_careful(HeapWord* addr); 1803 void print() const { print_on(tty); } 1804 void print_on(outputStream *st) const; 1805 }; 1806 1807 // Closures related to weak references processing 1808 1809 // During CMS' weak reference processing, this is a 1810 // work-routine/closure used to complete transitive 1811 // marking of objects as live after a certain point 1812 // in which an initial set has been completely accumulated. 1813 // This closure is currently used both during the final 1814 // remark stop-world phase, as well as during the concurrent 1815 // precleaning of the discovered reference lists. 1816 class CMSDrainMarkingStackClosure: public VoidClosure { 1817 CMSCollector* _collector; 1818 MemRegion _span; 1819 CMSMarkStack* _mark_stack; 1820 CMSBitMap* _bit_map; 1821 CMSKeepAliveClosure* _keep_alive; 1822 bool _concurrent_precleaning; 1823 public: 1824 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1825 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1826 CMSKeepAliveClosure* keep_alive, 1827 bool cpc): 1828 _collector(collector), 1829 _span(span), 1830 _bit_map(bit_map), 1831 _mark_stack(mark_stack), 1832 _keep_alive(keep_alive), 1833 _concurrent_precleaning(cpc) { 1834 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1835 "Mismatch"); 1836 } 1837 1838 void do_void(); 1839 }; 1840 1841 // A parallel version of CMSDrainMarkingStackClosure above. 1842 class CMSParDrainMarkingStackClosure: public VoidClosure { 1843 CMSCollector* _collector; 1844 MemRegion _span; 1845 OopTaskQueue* _work_queue; 1846 CMSBitMap* _bit_map; 1847 CMSInnerParMarkAndPushClosure _mark_and_push; 1848 1849 public: 1850 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1851 MemRegion span, CMSBitMap* bit_map, 1852 OopTaskQueue* work_queue): 1853 _collector(collector), 1854 _span(span), 1855 _bit_map(bit_map), 1856 _work_queue(work_queue), 1857 _mark_and_push(collector, span, bit_map, work_queue) { } 1858 1859 public: 1860 void trim_queue(uint max); 1861 void do_void(); 1862 }; 1863 1864 // Allow yielding or short-circuiting of reference list 1865 // prelceaning work. 1866 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1867 CMSCollector* _collector; 1868 void do_yield_work(); 1869 public: 1870 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1871 _collector(collector) {} 1872 virtual bool should_return(); 1873 }; 1874 1875 1876 // Convenience class that locks free list locks for given CMS collector 1877 class FreelistLocker: public StackObj { 1878 private: 1879 CMSCollector* _collector; 1880 public: 1881 FreelistLocker(CMSCollector* collector): 1882 _collector(collector) { 1883 _collector->getFreelistLocks(); 1884 } 1885 1886 ~FreelistLocker() { 1887 _collector->releaseFreelistLocks(); 1888 } 1889 }; 1890 1891 // Mark all dead objects in a given space. 1892 class MarkDeadObjectsClosure: public BlkClosure { 1893 const CMSCollector* _collector; 1894 const CompactibleFreeListSpace* _sp; 1895 CMSBitMap* _live_bit_map; 1896 CMSBitMap* _dead_bit_map; 1897 public: 1898 MarkDeadObjectsClosure(const CMSCollector* collector, 1899 const CompactibleFreeListSpace* sp, 1900 CMSBitMap *live_bit_map, 1901 CMSBitMap *dead_bit_map) : 1902 _collector(collector), 1903 _sp(sp), 1904 _live_bit_map(live_bit_map), 1905 _dead_bit_map(dead_bit_map) {} 1906 size_t do_blk(HeapWord* addr); 1907 }; 1908 1909 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1910 1911 public: 1912 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1913 }; 1914 1915 1916 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP