1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // ConcurrentMarkSweepGeneration is in support of a concurrent 26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 27 // style. We assume, for now, that this generation is always the 28 // seniormost generation (modulo the PermGeneration), and for simplicity 29 // in the first implementation, that this generation is a single compactible 30 // space. Neither of these restrictions appears essential, and will be 31 // relaxed in the future when more time is available to implement the 32 // greater generality (and there's a need for it). 33 // 34 // Concurrent mode failures are currently handled by 35 // means of a sliding mark-compact. 36 37 class CMSAdaptiveSizePolicy; 38 class CMSConcMarkingTask; 39 class CMSGCAdaptivePolicyCounters; 40 class ConcurrentMarkSweepGeneration; 41 class ConcurrentMarkSweepPolicy; 42 class ConcurrentMarkSweepThread; 43 class CompactibleFreeListSpace; 44 class FreeChunk; 45 class PromotionInfo; 46 class ScanMarkedObjectsAgainCarefullyClosure; 47 48 // A generic CMS bit map. It's the basis for both the CMS marking bit map 49 // as well as for the mod union table (in each case only a subset of the 50 // methods are used). This is essentially a wrapper around the BitMap class, 51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 52 // we have _shifter == 0. and for the mod union table we have 53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 54 // XXX 64-bit issues in BitMap? 55 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 56 friend class VMStructs; 57 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // shifts to convert HeapWord to bit position 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 public: 64 Mutex* const _lock; // mutex protecting _bm; 65 66 public: 67 // constructor 68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 69 70 // allocates the actual storage for the map 71 bool allocate(MemRegion mr); 72 // field getter 73 Mutex* lock() const { return _lock; } 74 // locking verifier convenience function 75 void assert_locked() const PRODUCT_RETURN; 76 77 // inquiries 78 HeapWord* startWord() const { return _bmStartWord; } 79 size_t sizeInWords() const { return _bmWordSize; } 80 size_t sizeInBits() const { return _bm.size(); } 81 // the following is one past the last word in space 82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 83 84 // reading marks 85 bool isMarked(HeapWord* addr) const; 86 bool par_isMarked(HeapWord* addr) const; // do not lock checks 87 bool isUnmarked(HeapWord* addr) const; 88 bool isAllClear() const; 89 90 // writing marks 91 void mark(HeapWord* addr); 92 // For marking by parallel GC threads; 93 // returns true if we did, false if another thread did 94 bool par_mark(HeapWord* addr); 95 96 void mark_range(MemRegion mr); 97 void par_mark_range(MemRegion mr); 98 void mark_large_range(MemRegion mr); 99 void par_mark_large_range(MemRegion mr); 100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 101 void clear_range(MemRegion mr); 102 void par_clear_range(MemRegion mr); 103 void clear_large_range(MemRegion mr); 104 void par_clear_large_range(MemRegion mr); 105 void clear_all(); 106 void clear_all_incrementally(); // Not yet implemented!! 107 108 NOT_PRODUCT( 109 // checks the memory region for validity 110 void region_invariant(MemRegion mr); 111 ) 112 113 // iteration 114 void iterate(BitMapClosure* cl) { 115 _bm.iterate(cl); 116 } 117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 118 void dirty_range_iterate_clear(MemRegionClosure* cl); 119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 120 121 // auxiliary support for iteration 122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 124 HeapWord* end_addr) const; 125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 127 HeapWord* end_addr) const; 128 MemRegion getAndClearMarkedRegion(HeapWord* addr); 129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 130 HeapWord* end_addr); 131 132 // conversion utilities 133 HeapWord* offsetToHeapWord(size_t offset) const; 134 size_t heapWordToOffset(HeapWord* addr) const; 135 size_t heapWordDiffToOffsetDiff(size_t diff) const; 136 137 // debugging 138 // is this address range covered by the bit-map? 139 NOT_PRODUCT( 140 bool covers(MemRegion mr) const; 141 bool covers(HeapWord* start, size_t size = 0) const; 142 ) 143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 144 }; 145 146 // Represents a marking stack used by the CMS collector. 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 148 class CMSMarkStack: public CHeapObj { 149 // 150 friend class CMSCollector; // to get at expasion stats further below 151 // 152 153 VirtualSpace _virtual_space; // space for the stack 154 oop* _base; // bottom of stack 155 size_t _index; // one more than last occupied index 156 size_t _capacity; // max #elements 157 Mutex _par_lock; // an advisory lock used in case of parallel access 158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 159 160 protected: 161 size_t _hit_limit; // we hit max stack size limit 162 size_t _failed_double; // we failed expansion before hitting limit 163 164 public: 165 CMSMarkStack(): 166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 167 _hit_limit(0), 168 _failed_double(0) {} 169 170 bool allocate(size_t size); 171 172 size_t capacity() const { return _capacity; } 173 174 oop pop() { 175 if (!isEmpty()) { 176 return _base[--_index] ; 177 } 178 return NULL; 179 } 180 181 bool push(oop ptr) { 182 if (isFull()) { 183 return false; 184 } else { 185 _base[_index++] = ptr; 186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 187 return true; 188 } 189 } 190 191 bool isEmpty() const { return _index == 0; } 192 bool isFull() const { 193 assert(_index <= _capacity, "buffer overflow"); 194 return _index == _capacity; 195 } 196 197 size_t length() { return _index; } 198 199 // "Parallel versions" of some of the above 200 oop par_pop() { 201 // lock and pop 202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 203 return pop(); 204 } 205 206 bool par_push(oop ptr) { 207 // lock and push 208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 209 return push(ptr); 210 } 211 212 // Forcibly reset the stack, losing all of its contents. 213 void reset() { 214 _index = 0; 215 } 216 217 // Expand the stack, typically in response to an overflow condition 218 void expand(); 219 220 // Compute the least valued stack element. 221 oop least_value(HeapWord* low) { 222 oop least = (oop)low; 223 for (size_t i = 0; i < _index; i++) { 224 least = MIN2(least, _base[i]); 225 } 226 return least; 227 } 228 229 // Exposed here to allow stack expansion in || case 230 Mutex* par_lock() { return &_par_lock; } 231 }; 232 233 class CardTableRS; 234 class CMSParGCThreadState; 235 236 class ModUnionClosure: public MemRegionClosure { 237 protected: 238 CMSBitMap* _t; 239 public: 240 ModUnionClosure(CMSBitMap* t): _t(t) { } 241 void do_MemRegion(MemRegion mr); 242 }; 243 244 class ModUnionClosurePar: public ModUnionClosure { 245 public: 246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 247 void do_MemRegion(MemRegion mr); 248 }; 249 250 // Survivor Chunk Array in support of parallelization of 251 // Survivor Space rescan. 252 class ChunkArray: public CHeapObj { 253 size_t _index; 254 size_t _capacity; 255 size_t _overflows; 256 HeapWord** _array; // storage for array 257 258 public: 259 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 260 ChunkArray(HeapWord** a, size_t c): 261 _index(0), _capacity(c), _overflows(0), _array(a) {} 262 263 HeapWord** array() { return _array; } 264 void set_array(HeapWord** a) { _array = a; } 265 266 size_t capacity() { return _capacity; } 267 void set_capacity(size_t c) { _capacity = c; } 268 269 size_t end() { 270 assert(_index <= capacity(), 271 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 272 _index, _capacity)); 273 return _index; 274 } // exclusive 275 276 HeapWord* nth(size_t n) { 277 assert(n < end(), "Out of bounds access"); 278 return _array[n]; 279 } 280 281 void reset() { 282 _index = 0; 283 if (_overflows > 0 && PrintCMSStatistics > 1) { 284 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 285 _capacity, _overflows); 286 } 287 _overflows = 0; 288 } 289 290 void record_sample(HeapWord* p, size_t sz) { 291 // For now we do not do anything with the size 292 if (_index < _capacity) { 293 _array[_index++] = p; 294 } else { 295 ++_overflows; 296 assert(_index == _capacity, 297 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 298 "): out of bounds at overflow#" SIZE_FORMAT, 299 _index, _capacity, _overflows)); 300 } 301 } 302 }; 303 304 // 305 // Timing, allocation and promotion statistics for gc scheduling and incremental 306 // mode pacing. Most statistics are exponential averages. 307 // 308 class CMSStats VALUE_OBJ_CLASS_SPEC { 309 private: 310 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 311 312 // The following are exponential averages with factor alpha: 313 // avg = (100 - alpha) * avg + alpha * cur_sample 314 // 315 // The durations measure: end_time[n] - start_time[n] 316 // The periods measure: start_time[n] - start_time[n-1] 317 // 318 // The cms period and duration include only concurrent collections; time spent 319 // in foreground cms collections due to System.gc() or because of a failure to 320 // keep up are not included. 321 // 322 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 323 // real value, but is used only after the first period. A value of 100 is 324 // used for the first sample so it gets the entire weight. 325 unsigned int _saved_alpha; // 0-100 326 unsigned int _gc0_alpha; 327 unsigned int _cms_alpha; 328 329 double _gc0_duration; 330 double _gc0_period; 331 size_t _gc0_promoted; // bytes promoted per gc0 332 double _cms_duration; 333 double _cms_duration_pre_sweep; // time from initiation to start of sweep 334 double _cms_duration_per_mb; 335 double _cms_period; 336 size_t _cms_allocated; // bytes of direct allocation per gc0 period 337 338 // Timers. 339 elapsedTimer _cms_timer; 340 TimeStamp _gc0_begin_time; 341 TimeStamp _cms_begin_time; 342 TimeStamp _cms_end_time; 343 344 // Snapshots of the amount used in the CMS generation. 345 size_t _cms_used_at_gc0_begin; 346 size_t _cms_used_at_gc0_end; 347 size_t _cms_used_at_cms_begin; 348 349 // Used to prevent the duty cycle from being reduced in the middle of a cms 350 // cycle. 351 bool _allow_duty_cycle_reduction; 352 353 enum { 354 _GC0_VALID = 0x1, 355 _CMS_VALID = 0x2, 356 _ALL_VALID = _GC0_VALID | _CMS_VALID 357 }; 358 359 unsigned int _valid_bits; 360 361 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 362 363 protected: 364 365 // Return a duty cycle that avoids wild oscillations, by limiting the amount 366 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 367 // as a recommended value). 368 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 369 unsigned int new_duty_cycle); 370 unsigned int icms_update_duty_cycle_impl(); 371 372 // In support of adjusting of cms trigger ratios based on history 373 // of concurrent mode failure. 374 double cms_free_adjustment_factor(size_t free) const; 375 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 376 377 public: 378 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 379 unsigned int alpha = CMSExpAvgFactor); 380 381 // Whether or not the statistics contain valid data; higher level statistics 382 // cannot be called until this returns true (they require at least one young 383 // gen and one cms cycle to have completed). 384 bool valid() const; 385 386 // Record statistics. 387 void record_gc0_begin(); 388 void record_gc0_end(size_t cms_gen_bytes_used); 389 void record_cms_begin(); 390 void record_cms_end(); 391 392 // Allow management of the cms timer, which must be stopped/started around 393 // yield points. 394 elapsedTimer& cms_timer() { return _cms_timer; } 395 void start_cms_timer() { _cms_timer.start(); } 396 void stop_cms_timer() { _cms_timer.stop(); } 397 398 // Basic statistics; units are seconds or bytes. 399 double gc0_period() const { return _gc0_period; } 400 double gc0_duration() const { return _gc0_duration; } 401 size_t gc0_promoted() const { return _gc0_promoted; } 402 double cms_period() const { return _cms_period; } 403 double cms_duration() const { return _cms_duration; } 404 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 405 size_t cms_allocated() const { return _cms_allocated; } 406 407 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 408 409 // Seconds since the last background cms cycle began or ended. 410 double cms_time_since_begin() const; 411 double cms_time_since_end() const; 412 413 // Higher level statistics--caller must check that valid() returns true before 414 // calling. 415 416 // Returns bytes promoted per second of wall clock time. 417 double promotion_rate() const; 418 419 // Returns bytes directly allocated per second of wall clock time. 420 double cms_allocation_rate() const; 421 422 // Rate at which space in the cms generation is being consumed (sum of the 423 // above two). 424 double cms_consumption_rate() const; 425 426 // Returns an estimate of the number of seconds until the cms generation will 427 // fill up, assuming no collection work is done. 428 double time_until_cms_gen_full() const; 429 430 // Returns an estimate of the number of seconds remaining until 431 // the cms generation collection should start. 432 double time_until_cms_start() const; 433 434 // End of higher level statistics. 435 436 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 437 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 438 439 // Update the duty cycle and return the new value. 440 unsigned int icms_update_duty_cycle(); 441 442 // Debugging. 443 void print_on(outputStream* st) const PRODUCT_RETURN; 444 void print() const { print_on(gclog_or_tty); } 445 }; 446 447 // A closure related to weak references processing which 448 // we embed in the CMSCollector, since we need to pass 449 // it to the reference processor for secondary filtering 450 // of references based on reachability of referent; 451 // see role of _is_alive_non_header closure in the 452 // ReferenceProcessor class. 453 // For objects in the CMS generation, this closure checks 454 // if the object is "live" (reachable). Used in weak 455 // reference processing. 456 class CMSIsAliveClosure: public BoolObjectClosure { 457 const MemRegion _span; 458 const CMSBitMap* _bit_map; 459 460 friend class CMSCollector; 461 public: 462 CMSIsAliveClosure(MemRegion span, 463 CMSBitMap* bit_map): 464 _span(span), 465 _bit_map(bit_map) { 466 assert(!span.is_empty(), "Empty span could spell trouble"); 467 } 468 469 void do_object(oop obj) { 470 assert(false, "not to be invoked"); 471 } 472 473 bool do_object_b(oop obj); 474 }; 475 476 477 // Implements AbstractRefProcTaskExecutor for CMS. 478 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 479 public: 480 481 CMSRefProcTaskExecutor(CMSCollector& collector) 482 : _collector(collector) 483 { } 484 485 // Executes a task using worker threads. 486 virtual void execute(ProcessTask& task); 487 virtual void execute(EnqueueTask& task); 488 private: 489 CMSCollector& _collector; 490 }; 491 492 493 class CMSCollector: public CHeapObj { 494 friend class VMStructs; 495 friend class ConcurrentMarkSweepThread; 496 friend class ConcurrentMarkSweepGeneration; 497 friend class CompactibleFreeListSpace; 498 friend class CMSParRemarkTask; 499 friend class CMSConcMarkingTask; 500 friend class CMSRefProcTaskProxy; 501 friend class CMSRefProcTaskExecutor; 502 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 503 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 504 friend class PushOrMarkClosure; // to access _restart_addr 505 friend class Par_PushOrMarkClosure; // to access _restart_addr 506 friend class MarkFromRootsClosure; // -- ditto -- 507 // ... and for clearing cards 508 friend class Par_MarkFromRootsClosure; // to access _restart_addr 509 // ... and for clearing cards 510 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 511 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 512 friend class PushAndMarkVerifyClosure; // -- ditto -- 513 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 514 friend class PushAndMarkClosure; // -- ditto -- 515 friend class Par_PushAndMarkClosure; // -- ditto -- 516 friend class CMSKeepAliveClosure; // -- ditto -- 517 friend class CMSDrainMarkingStackClosure; // -- ditto -- 518 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 519 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 520 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 521 friend class VM_CMS_Operation; 522 friend class VM_CMS_Initial_Mark; 523 friend class VM_CMS_Final_Remark; 524 friend class TraceCMSMemoryManagerStats; 525 526 private: 527 jlong _time_of_last_gc; 528 void update_time_of_last_gc(jlong now) { 529 _time_of_last_gc = now; 530 } 531 532 OopTaskQueueSet* _task_queues; 533 534 // Overflow list of grey objects, threaded through mark-word 535 // Manipulated with CAS in the parallel/multi-threaded case. 536 oop _overflow_list; 537 // The following array-pair keeps track of mark words 538 // displaced for accomodating overflow list above. 539 // This code will likely be revisited under RFE#4922830. 540 Stack<oop> _preserved_oop_stack; 541 Stack<markOop> _preserved_mark_stack; 542 543 int* _hash_seed; 544 545 // In support of multi-threaded concurrent phases 546 YieldingFlexibleWorkGang* _conc_workers; 547 548 // Performance Counters 549 CollectorCounters* _gc_counters; 550 551 // Initialization Errors 552 bool _completed_initialization; 553 554 // In support of ExplicitGCInvokesConcurrent 555 static bool _full_gc_requested; 556 unsigned int _collection_count_start; 557 558 // Should we unload classes this concurrent cycle? 559 bool _should_unload_classes; 560 unsigned int _concurrent_cycles_since_last_unload; 561 unsigned int concurrent_cycles_since_last_unload() const { 562 return _concurrent_cycles_since_last_unload; 563 } 564 // Did we (allow) unload classes in the previous concurrent cycle? 565 bool unloaded_classes_last_cycle() const { 566 return concurrent_cycles_since_last_unload() == 0; 567 } 568 // Root scanning options for perm gen 569 int _roots_scanning_options; 570 int roots_scanning_options() const { return _roots_scanning_options; } 571 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 572 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 573 574 // Verification support 575 CMSBitMap _verification_mark_bm; 576 void verify_after_remark_work_1(); 577 void verify_after_remark_work_2(); 578 579 // true if any verification flag is on. 580 bool _verifying; 581 bool verifying() const { return _verifying; } 582 void set_verifying(bool v) { _verifying = v; } 583 584 // Collector policy 585 ConcurrentMarkSweepPolicy* _collector_policy; 586 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 587 588 // XXX Move these to CMSStats ??? FIX ME !!! 589 elapsedTimer _inter_sweep_timer; // time between sweeps 590 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 591 // padded decaying average estimates of the above 592 AdaptivePaddedAverage _inter_sweep_estimate; 593 AdaptivePaddedAverage _intra_sweep_estimate; 594 595 protected: 596 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 597 ConcurrentMarkSweepGeneration* _permGen; // perm gen 598 MemRegion _span; // span covering above two 599 CardTableRS* _ct; // card table 600 601 // CMS marking support structures 602 CMSBitMap _markBitMap; 603 CMSBitMap _modUnionTable; 604 CMSMarkStack _markStack; 605 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects 606 // to revisit 607 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. 608 609 HeapWord* _restart_addr; // in support of marking stack overflow 610 void lower_restart_addr(HeapWord* low); 611 612 // Counters in support of marking stack / work queue overflow handling: 613 // a non-zero value indicates certain types of overflow events during 614 // the current CMS cycle and could lead to stack resizing efforts at 615 // an opportune future time. 616 size_t _ser_pmc_preclean_ovflw; 617 size_t _ser_pmc_remark_ovflw; 618 size_t _par_pmc_remark_ovflw; 619 size_t _ser_kac_preclean_ovflw; 620 size_t _ser_kac_ovflw; 621 size_t _par_kac_ovflw; 622 NOT_PRODUCT(ssize_t _num_par_pushes;) 623 624 // ("Weak") Reference processing support 625 ReferenceProcessor* _ref_processor; 626 CMSIsAliveClosure _is_alive_closure; 627 // keep this textually after _markBitMap and _span; c'tor dependency 628 629 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 630 ModUnionClosure _modUnionClosure; 631 ModUnionClosurePar _modUnionClosurePar; 632 633 // CMS abstract state machine 634 // initial_state: Idling 635 // next_state(Idling) = {Marking} 636 // next_state(Marking) = {Precleaning, Sweeping} 637 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 638 // next_state(AbortablePreclean) = {FinalMarking} 639 // next_state(FinalMarking) = {Sweeping} 640 // next_state(Sweeping) = {Resizing} 641 // next_state(Resizing) = {Resetting} 642 // next_state(Resetting) = {Idling} 643 // The numeric values below are chosen so that: 644 // . _collectorState <= Idling == post-sweep && pre-mark 645 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 646 // precleaning || abortablePrecleanb 647 public: 648 enum CollectorState { 649 Resizing = 0, 650 Resetting = 1, 651 Idling = 2, 652 InitialMarking = 3, 653 Marking = 4, 654 Precleaning = 5, 655 AbortablePreclean = 6, 656 FinalMarking = 7, 657 Sweeping = 8 658 }; 659 protected: 660 static CollectorState _collectorState; 661 662 // State related to prologue/epilogue invocation for my generations 663 bool _between_prologue_and_epilogue; 664 665 // Signalling/State related to coordination between fore- and backgroud GC 666 // Note: When the baton has been passed from background GC to foreground GC, 667 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 668 static bool _foregroundGCIsActive; // true iff foreground collector is active or 669 // wants to go active 670 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 671 // yet passed the baton to the foreground GC 672 673 // Support for CMSScheduleRemark (abortable preclean) 674 bool _abort_preclean; 675 bool _start_sampling; 676 677 int _numYields; 678 size_t _numDirtyCards; 679 size_t _sweep_count; 680 // number of full gc's since the last concurrent gc. 681 uint _full_gcs_since_conc_gc; 682 683 // occupancy used for bootstrapping stats 684 double _bootstrap_occupancy; 685 686 // timer 687 elapsedTimer _timer; 688 689 // Timing, allocation and promotion statistics, used for scheduling. 690 CMSStats _stats; 691 692 // Allocation limits installed in the young gen, used only in 693 // CMSIncrementalMode. When an allocation in the young gen would cross one of 694 // these limits, the cms generation is notified and the cms thread is started 695 // or stopped, respectively. 696 HeapWord* _icms_start_limit; 697 HeapWord* _icms_stop_limit; 698 699 enum CMS_op_type { 700 CMS_op_checkpointRootsInitial, 701 CMS_op_checkpointRootsFinal 702 }; 703 704 void do_CMS_operation(CMS_op_type op); 705 bool stop_world_and_do(CMS_op_type op); 706 707 OopTaskQueueSet* task_queues() { return _task_queues; } 708 int* hash_seed(int i) { return &_hash_seed[i]; } 709 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 710 711 // Support for parallelizing Eden rescan in CMS remark phase 712 void sample_eden(); // ... sample Eden space top 713 714 private: 715 // Support for parallelizing young gen rescan in CMS remark phase 716 Generation* _young_gen; // the younger gen 717 HeapWord** _top_addr; // ... Top of Eden 718 HeapWord** _end_addr; // ... End of Eden 719 HeapWord** _eden_chunk_array; // ... Eden partitioning array 720 size_t _eden_chunk_index; // ... top (exclusive) of array 721 size_t _eden_chunk_capacity; // ... max entries in array 722 723 // Support for parallelizing survivor space rescan 724 HeapWord** _survivor_chunk_array; 725 size_t _survivor_chunk_index; 726 size_t _survivor_chunk_capacity; 727 size_t* _cursor; 728 ChunkArray* _survivor_plab_array; 729 730 // Support for marking stack overflow handling 731 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 732 bool par_take_from_overflow_list(size_t num, 733 OopTaskQueue* to_work_q, 734 int no_of_gc_threads); 735 void push_on_overflow_list(oop p); 736 void par_push_on_overflow_list(oop p); 737 // the following is, obviously, not, in general, "MT-stable" 738 bool overflow_list_is_empty() const; 739 740 void preserve_mark_if_necessary(oop p); 741 void par_preserve_mark_if_necessary(oop p); 742 void preserve_mark_work(oop p, markOop m); 743 void restore_preserved_marks_if_any(); 744 NOT_PRODUCT(bool no_preserved_marks() const;) 745 // in support of testing overflow code 746 NOT_PRODUCT(int _overflow_counter;) 747 NOT_PRODUCT(bool simulate_overflow();) // sequential 748 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 749 750 // CMS work methods 751 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 752 753 // a return value of false indicates failure due to stack overflow 754 bool markFromRootsWork(bool asynch); // concurrent marking work 755 756 public: // FIX ME!!! only for testing 757 bool do_marking_st(bool asynch); // single-threaded marking 758 bool do_marking_mt(bool asynch); // multi-threaded marking 759 760 private: 761 762 // concurrent precleaning work 763 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 764 ScanMarkedObjectsAgainCarefullyClosure* cl); 765 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 766 ScanMarkedObjectsAgainCarefullyClosure* cl); 767 // Does precleaning work, returning a quantity indicative of 768 // the amount of "useful work" done. 769 size_t preclean_work(bool clean_refs, bool clean_survivors); 770 void abortable_preclean(); // Preclean while looking for possible abort 771 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 772 // Helper function for above; merge-sorts the per-thread plab samples 773 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 774 // Resets (i.e. clears) the per-thread plab sample vectors 775 void reset_survivor_plab_arrays(); 776 777 // final (second) checkpoint work 778 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 779 bool init_mark_was_synchronous); 780 // work routine for parallel version of remark 781 void do_remark_parallel(); 782 // work routine for non-parallel version of remark 783 void do_remark_non_parallel(); 784 // reference processing work routine (during second checkpoint) 785 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 786 787 // concurrent sweeping work 788 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 789 790 // (concurrent) resetting of support data structures 791 void reset(bool asynch); 792 793 // Clear _expansion_cause fields of constituent generations 794 void clear_expansion_cause(); 795 796 // An auxilliary method used to record the ends of 797 // used regions of each generation to limit the extent of sweep 798 void save_sweep_limits(); 799 800 // Resize the generations included in the collector. 801 void compute_new_size(); 802 803 // A work method used by foreground collection to determine 804 // what type of collection (compacting or not, continuing or fresh) 805 // it should do. 806 void decide_foreground_collection_type(bool clear_all_soft_refs, 807 bool* should_compact, bool* should_start_over); 808 809 // A work method used by the foreground collector to do 810 // a mark-sweep-compact. 811 void do_compaction_work(bool clear_all_soft_refs); 812 813 // A work method used by the foreground collector to do 814 // a mark-sweep, after taking over from a possibly on-going 815 // concurrent mark-sweep collection. 816 void do_mark_sweep_work(bool clear_all_soft_refs, 817 CollectorState first_state, bool should_start_over); 818 819 // If the backgrould GC is active, acquire control from the background 820 // GC and do the collection. 821 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 822 823 // For synchronizing passing of control from background to foreground 824 // GC. waitForForegroundGC() is called by the background 825 // collector. It if had to wait for a foreground collection, 826 // it returns true and the background collection should assume 827 // that the collection was finished by the foreground 828 // collector. 829 bool waitForForegroundGC(); 830 831 // Incremental mode triggering: recompute the icms duty cycle and set the 832 // allocation limits in the young gen. 833 void icms_update_allocation_limits(); 834 835 size_t block_size_using_printezis_bits(HeapWord* addr) const; 836 size_t block_size_if_printezis_bits(HeapWord* addr) const; 837 HeapWord* next_card_start_after_block(HeapWord* addr) const; 838 839 void setup_cms_unloading_and_verification_state(); 840 public: 841 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 842 ConcurrentMarkSweepGeneration* permGen, 843 CardTableRS* ct, 844 ConcurrentMarkSweepPolicy* cp); 845 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 846 847 ReferenceProcessor* ref_processor() { return _ref_processor; } 848 void ref_processor_init(); 849 850 Mutex* bitMapLock() const { return _markBitMap.lock(); } 851 static CollectorState abstract_state() { return _collectorState; } 852 853 bool should_abort_preclean() const; // Whether preclean should be aborted. 854 size_t get_eden_used() const; 855 size_t get_eden_capacity() const; 856 857 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 858 859 // locking checks 860 NOT_PRODUCT(static bool have_cms_token();) 861 862 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 863 bool shouldConcurrentCollect(); 864 865 void collect(bool full, 866 bool clear_all_soft_refs, 867 size_t size, 868 bool tlab); 869 void collect_in_background(bool clear_all_soft_refs); 870 void collect_in_foreground(bool clear_all_soft_refs); 871 872 // In support of ExplicitGCInvokesConcurrent 873 static void request_full_gc(unsigned int full_gc_count); 874 // Should we unload classes in a particular concurrent cycle? 875 bool should_unload_classes() const { 876 return _should_unload_classes; 877 } 878 bool update_should_unload_classes(); 879 880 void direct_allocated(HeapWord* start, size_t size); 881 882 // Object is dead if not marked and current phase is sweeping. 883 bool is_dead_obj(oop obj) const; 884 885 // After a promotion (of "start"), do any necessary marking. 886 // If "par", then it's being done by a parallel GC thread. 887 // The last two args indicate if we need precise marking 888 // and if so the size of the object so it can be dirtied 889 // in its entirety. 890 void promoted(bool par, HeapWord* start, 891 bool is_obj_array, size_t obj_size); 892 893 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 894 size_t word_size); 895 896 void getFreelistLocks() const; 897 void releaseFreelistLocks() const; 898 bool haveFreelistLocks() const; 899 900 // GC prologue and epilogue 901 void gc_prologue(bool full); 902 void gc_epilogue(bool full); 903 904 jlong time_of_last_gc(jlong now) { 905 if (_collectorState <= Idling) { 906 // gc not in progress 907 return _time_of_last_gc; 908 } else { 909 // collection in progress 910 return now; 911 } 912 } 913 914 // Support for parallel remark of survivor space 915 void* get_data_recorder(int thr_num); 916 917 CMSBitMap* markBitMap() { return &_markBitMap; } 918 void directAllocated(HeapWord* start, size_t size); 919 920 // main CMS steps and related support 921 void checkpointRootsInitial(bool asynch); 922 bool markFromRoots(bool asynch); // a return value of false indicates failure 923 // due to stack overflow 924 void preclean(); 925 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 926 bool init_mark_was_synchronous); 927 void sweep(bool asynch); 928 929 // Check that the currently executing thread is the expected 930 // one (foreground collector or background collector). 931 static void check_correct_thread_executing() PRODUCT_RETURN; 932 // XXXPERM void print_statistics() PRODUCT_RETURN; 933 934 bool is_cms_reachable(HeapWord* addr); 935 936 // Performance Counter Support 937 CollectorCounters* counters() { return _gc_counters; } 938 939 // timer stuff 940 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 941 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 942 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 943 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 944 945 int yields() { return _numYields; } 946 void resetYields() { _numYields = 0; } 947 void incrementYields() { _numYields++; } 948 void resetNumDirtyCards() { _numDirtyCards = 0; } 949 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 950 size_t numDirtyCards() { return _numDirtyCards; } 951 952 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 953 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 954 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 955 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 956 size_t sweep_count() const { return _sweep_count; } 957 void increment_sweep_count() { _sweep_count++; } 958 959 // Timers/stats for gc scheduling and incremental mode pacing. 960 CMSStats& stats() { return _stats; } 961 962 // Convenience methods that check whether CMSIncrementalMode is enabled and 963 // forward to the corresponding methods in ConcurrentMarkSweepThread. 964 static void start_icms(); 965 static void stop_icms(); // Called at the end of the cms cycle. 966 static void disable_icms(); // Called before a foreground collection. 967 static void enable_icms(); // Called after a foreground collection. 968 void icms_wait(); // Called at yield points. 969 970 // Adaptive size policy 971 CMSAdaptiveSizePolicy* size_policy(); 972 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 973 974 // debugging 975 void verify(bool); 976 bool verify_after_remark(); 977 void verify_ok_to_terminate() const PRODUCT_RETURN; 978 void verify_work_stacks_empty() const PRODUCT_RETURN; 979 void verify_overflow_empty() const PRODUCT_RETURN; 980 981 // convenience methods in support of debugging 982 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 983 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 984 985 // accessors 986 CMSMarkStack* verification_mark_stack() { return &_markStack; } 987 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 988 989 // Get the bit map with a perm gen "deadness" information. 990 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } 991 992 // Initialization errors 993 bool completed_initialization() { return _completed_initialization; } 994 }; 995 996 class CMSExpansionCause : public AllStatic { 997 public: 998 enum Cause { 999 _no_expansion, 1000 _satisfy_free_ratio, 1001 _satisfy_promotion, 1002 _satisfy_allocation, 1003 _allocate_par_lab, 1004 _allocate_par_spooling_space, 1005 _adaptive_size_policy 1006 }; 1007 // Return a string describing the cause of the expansion. 1008 static const char* to_string(CMSExpansionCause::Cause cause); 1009 }; 1010 1011 class ConcurrentMarkSweepGeneration: public CardGeneration { 1012 friend class VMStructs; 1013 friend class ConcurrentMarkSweepThread; 1014 friend class ConcurrentMarkSweep; 1015 friend class CMSCollector; 1016 protected: 1017 static CMSCollector* _collector; // the collector that collects us 1018 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1019 1020 // Performance Counters 1021 GenerationCounters* _gen_counters; 1022 GSpaceCounters* _space_counters; 1023 1024 // Words directly allocated, used by CMSStats. 1025 size_t _direct_allocated_words; 1026 1027 // Non-product stat counters 1028 NOT_PRODUCT( 1029 size_t _numObjectsPromoted; 1030 size_t _numWordsPromoted; 1031 size_t _numObjectsAllocated; 1032 size_t _numWordsAllocated; 1033 ) 1034 1035 // Used for sizing decisions 1036 bool _incremental_collection_failed; 1037 bool incremental_collection_failed() { 1038 return _incremental_collection_failed; 1039 } 1040 void set_incremental_collection_failed() { 1041 _incremental_collection_failed = true; 1042 } 1043 void clear_incremental_collection_failed() { 1044 _incremental_collection_failed = false; 1045 } 1046 1047 // accessors 1048 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1049 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1050 1051 private: 1052 // For parallel young-gen GC support. 1053 CMSParGCThreadState** _par_gc_thread_states; 1054 1055 // Reason generation was expanded 1056 CMSExpansionCause::Cause _expansion_cause; 1057 1058 // In support of MinChunkSize being larger than min object size 1059 const double _dilatation_factor; 1060 1061 enum CollectionTypes { 1062 Concurrent_collection_type = 0, 1063 MS_foreground_collection_type = 1, 1064 MSC_foreground_collection_type = 2, 1065 Unknown_collection_type = 3 1066 }; 1067 1068 CollectionTypes _debug_collection_type; 1069 1070 // Fraction of current occupancy at which to start a CMS collection which 1071 // will collect this generation (at least). 1072 double _initiating_occupancy; 1073 1074 protected: 1075 // Shrink generation by specified size (returns false if unable to shrink) 1076 virtual void shrink_by(size_t bytes); 1077 1078 // Update statistics for GC 1079 virtual void update_gc_stats(int level, bool full); 1080 1081 // Maximum available space in the generation (including uncommitted) 1082 // space. 1083 size_t max_available() const; 1084 1085 // getter and initializer for _initiating_occupancy field. 1086 double initiating_occupancy() const { return _initiating_occupancy; } 1087 void init_initiating_occupancy(intx io, intx tr); 1088 1089 public: 1090 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1091 int level, CardTableRS* ct, 1092 bool use_adaptive_freelists, 1093 FreeBlockDictionary::DictionaryChoice); 1094 1095 // Accessors 1096 CMSCollector* collector() const { return _collector; } 1097 static void set_collector(CMSCollector* collector) { 1098 assert(_collector == NULL, "already set"); 1099 _collector = collector; 1100 } 1101 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1102 1103 Mutex* freelistLock() const; 1104 1105 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1106 1107 // Adaptive size policy 1108 CMSAdaptiveSizePolicy* size_policy(); 1109 1110 bool refs_discovery_is_atomic() const { return false; } 1111 bool refs_discovery_is_mt() const { 1112 // Note: CMS does MT-discovery during the parallel-remark 1113 // phases. Use ReferenceProcessorMTMutator to make refs 1114 // discovery MT-safe during such phases or other parallel 1115 // discovery phases in the future. This may all go away 1116 // if/when we decide that refs discovery is sufficiently 1117 // rare that the cost of the CAS's involved is in the 1118 // noise. That's a measurement that should be done, and 1119 // the code simplified if that turns out to be the case. 1120 return false; 1121 } 1122 1123 // Override 1124 virtual void ref_processor_init(); 1125 1126 // Grow generation by specified size (returns false if unable to grow) 1127 bool grow_by(size_t bytes); 1128 // Grow generation to reserved size. 1129 bool grow_to_reserved(); 1130 1131 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1132 1133 // Space enquiries 1134 size_t capacity() const; 1135 size_t used() const; 1136 size_t free() const; 1137 double occupancy() const { return ((double)used())/((double)capacity()); } 1138 size_t contiguous_available() const; 1139 size_t unsafe_max_alloc_nogc() const; 1140 1141 // over-rides 1142 MemRegion used_region() const; 1143 MemRegion used_region_at_save_marks() const; 1144 1145 // Does a "full" (forced) collection invoked on this generation collect 1146 // all younger generations as well? Note that the second conjunct is a 1147 // hack to allow the collection of the younger gen first if the flag is 1148 // set. This is better than using th policy's should_collect_gen0_first() 1149 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1150 virtual bool full_collects_younger_generations() const { 1151 return UseCMSCompactAtFullCollection && !CollectGen0First; 1152 } 1153 1154 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1155 1156 // Support for compaction 1157 CompactibleSpace* first_compaction_space() const; 1158 // Adjust quantites in the generation affected by 1159 // the compaction. 1160 void reset_after_compaction(); 1161 1162 // Allocation support 1163 HeapWord* allocate(size_t size, bool tlab); 1164 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1165 oop promote(oop obj, size_t obj_size); 1166 HeapWord* par_allocate(size_t size, bool tlab) { 1167 return allocate(size, tlab); 1168 } 1169 1170 // Incremental mode triggering. 1171 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1172 size_t word_size); 1173 1174 // Used by CMSStats to track direct allocation. The value is sampled and 1175 // reset after each young gen collection. 1176 size_t direct_allocated_words() const { return _direct_allocated_words; } 1177 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1178 1179 // Overrides for parallel promotion. 1180 virtual oop par_promote(int thread_num, 1181 oop obj, markOop m, size_t word_sz); 1182 // This one should not be called for CMS. 1183 virtual void par_promote_alloc_undo(int thread_num, 1184 HeapWord* obj, size_t word_sz); 1185 virtual void par_promote_alloc_done(int thread_num); 1186 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1187 1188 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, 1189 bool younger_handles_promotion_failure) const; 1190 1191 // Inform this (non-young) generation that a promotion failure was 1192 // encountered during a collection of a younger generation that 1193 // promotes into this generation. 1194 virtual void promotion_failure_occurred(); 1195 1196 bool should_collect(bool full, size_t size, bool tlab); 1197 virtual bool should_concurrent_collect() const; 1198 virtual bool is_too_full() const; 1199 void collect(bool full, 1200 bool clear_all_soft_refs, 1201 size_t size, 1202 bool tlab); 1203 1204 HeapWord* expand_and_allocate(size_t word_size, 1205 bool tlab, 1206 bool parallel = false); 1207 1208 // GC prologue and epilogue 1209 void gc_prologue(bool full); 1210 void gc_prologue_work(bool full, bool registerClosure, 1211 ModUnionClosure* modUnionClosure); 1212 void gc_epilogue(bool full); 1213 void gc_epilogue_work(bool full); 1214 1215 // Time since last GC of this generation 1216 jlong time_of_last_gc(jlong now) { 1217 return collector()->time_of_last_gc(now); 1218 } 1219 void update_time_of_last_gc(jlong now) { 1220 collector()-> update_time_of_last_gc(now); 1221 } 1222 1223 // Allocation failure 1224 void expand(size_t bytes, size_t expand_bytes, 1225 CMSExpansionCause::Cause cause); 1226 virtual bool expand(size_t bytes, size_t expand_bytes); 1227 void shrink(size_t bytes); 1228 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1229 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1230 1231 // Iteration support and related enquiries 1232 void save_marks(); 1233 bool no_allocs_since_save_marks(); 1234 void object_iterate_since_last_GC(ObjectClosure* cl); 1235 void younger_refs_iterate(OopsInGenClosure* cl); 1236 1237 // Iteration support specific to CMS generations 1238 void save_sweep_limit(); 1239 1240 // More iteration support 1241 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 1242 virtual void oop_iterate(OopClosure* cl); 1243 virtual void safe_object_iterate(ObjectClosure* cl); 1244 virtual void object_iterate(ObjectClosure* cl); 1245 1246 // Need to declare the full complement of closures, whether we'll 1247 // override them or not, or get message from the compiler: 1248 // oop_since_save_marks_iterate_nv hides virtual function... 1249 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1250 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1251 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1252 1253 // Smart allocation XXX -- move to CFLSpace? 1254 void setNearLargestChunk(); 1255 bool isNearLargestChunk(HeapWord* addr); 1256 1257 // Get the chunk at the end of the space. Delagates to 1258 // the space. 1259 FreeChunk* find_chunk_at_end(); 1260 1261 // Overriding of unused functionality (sharing not yet supported with CMS) 1262 void pre_adjust_pointers(); 1263 void post_compact(); 1264 1265 // Debugging 1266 void prepare_for_verify(); 1267 void verify(bool allow_dirty); 1268 void print_statistics() PRODUCT_RETURN; 1269 1270 // Performance Counters support 1271 virtual void update_counters(); 1272 virtual void update_counters(size_t used); 1273 void initialize_performance_counters(); 1274 CollectorCounters* counters() { return collector()->counters(); } 1275 1276 // Support for parallel remark of survivor space 1277 void* get_data_recorder(int thr_num) { 1278 //Delegate to collector 1279 return collector()->get_data_recorder(thr_num); 1280 } 1281 1282 // Printing 1283 const char* name() const; 1284 virtual const char* short_name() const { return "CMS"; } 1285 void print() const; 1286 void printOccupancy(const char* s); 1287 bool must_be_youngest() const { return false; } 1288 bool must_be_oldest() const { return true; } 1289 1290 void compute_new_size(); 1291 1292 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1293 void rotate_debug_collection_type(); 1294 }; 1295 1296 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1297 1298 // Return the size policy from the heap's collector 1299 // policy casted to CMSAdaptiveSizePolicy*. 1300 CMSAdaptiveSizePolicy* cms_size_policy() const; 1301 1302 // Resize the generation based on the adaptive size 1303 // policy. 1304 void resize(size_t cur_promo, size_t desired_promo); 1305 1306 // Return the GC counters from the collector policy 1307 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1308 1309 virtual void shrink_by(size_t bytes); 1310 1311 public: 1312 virtual void compute_new_size(); 1313 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1314 int level, CardTableRS* ct, 1315 bool use_adaptive_freelists, 1316 FreeBlockDictionary::DictionaryChoice 1317 dictionaryChoice) : 1318 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1319 use_adaptive_freelists, dictionaryChoice) {} 1320 1321 virtual const char* short_name() const { return "ASCMS"; } 1322 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1323 1324 virtual void update_counters(); 1325 virtual void update_counters(size_t used); 1326 }; 1327 1328 // 1329 // Closures of various sorts used by CMS to accomplish its work 1330 // 1331 1332 // This closure is used to check that a certain set of oops is empty. 1333 class FalseClosure: public OopClosure { 1334 public: 1335 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } 1336 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } 1337 }; 1338 1339 // This closure is used to do concurrent marking from the roots 1340 // following the first checkpoint. 1341 class MarkFromRootsClosure: public BitMapClosure { 1342 CMSCollector* _collector; 1343 MemRegion _span; 1344 CMSBitMap* _bitMap; 1345 CMSBitMap* _mut; 1346 CMSMarkStack* _markStack; 1347 CMSMarkStack* _revisitStack; 1348 bool _yield; 1349 int _skipBits; 1350 HeapWord* _finger; 1351 HeapWord* _threshold; 1352 DEBUG_ONLY(bool _verifying;) 1353 1354 public: 1355 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1356 CMSBitMap* bitMap, 1357 CMSMarkStack* markStack, 1358 CMSMarkStack* revisitStack, 1359 bool should_yield, bool verifying = false); 1360 bool do_bit(size_t offset); 1361 void reset(HeapWord* addr); 1362 inline void do_yield_check(); 1363 1364 private: 1365 void scanOopsInOop(HeapWord* ptr); 1366 void do_yield_work(); 1367 }; 1368 1369 // This closure is used to do concurrent multi-threaded 1370 // marking from the roots following the first checkpoint. 1371 // XXX This should really be a subclass of The serial version 1372 // above, but i have not had the time to refactor things cleanly. 1373 // That willbe done for Dolphin. 1374 class Par_MarkFromRootsClosure: public BitMapClosure { 1375 CMSCollector* _collector; 1376 MemRegion _whole_span; 1377 MemRegion _span; 1378 CMSBitMap* _bit_map; 1379 CMSBitMap* _mut; 1380 OopTaskQueue* _work_queue; 1381 CMSMarkStack* _overflow_stack; 1382 CMSMarkStack* _revisit_stack; 1383 bool _yield; 1384 int _skip_bits; 1385 HeapWord* _finger; 1386 HeapWord* _threshold; 1387 CMSConcMarkingTask* _task; 1388 public: 1389 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1390 MemRegion span, 1391 CMSBitMap* bit_map, 1392 OopTaskQueue* work_queue, 1393 CMSMarkStack* overflow_stack, 1394 CMSMarkStack* revisit_stack, 1395 bool should_yield); 1396 bool do_bit(size_t offset); 1397 inline void do_yield_check(); 1398 1399 private: 1400 void scan_oops_in_oop(HeapWord* ptr); 1401 void do_yield_work(); 1402 bool get_work_from_overflow_stack(); 1403 }; 1404 1405 // The following closures are used to do certain kinds of verification of 1406 // CMS marking. 1407 class PushAndMarkVerifyClosure: public OopClosure { 1408 CMSCollector* _collector; 1409 MemRegion _span; 1410 CMSBitMap* _verification_bm; 1411 CMSBitMap* _cms_bm; 1412 CMSMarkStack* _mark_stack; 1413 protected: 1414 void do_oop(oop p); 1415 template <class T> inline void do_oop_work(T *p) { 1416 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 1417 do_oop(obj); 1418 } 1419 public: 1420 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1421 MemRegion span, 1422 CMSBitMap* verification_bm, 1423 CMSBitMap* cms_bm, 1424 CMSMarkStack* mark_stack); 1425 void do_oop(oop* p); 1426 void do_oop(narrowOop* p); 1427 // Deal with a stack overflow condition 1428 void handle_stack_overflow(HeapWord* lost); 1429 }; 1430 1431 class MarkFromRootsVerifyClosure: public BitMapClosure { 1432 CMSCollector* _collector; 1433 MemRegion _span; 1434 CMSBitMap* _verification_bm; 1435 CMSBitMap* _cms_bm; 1436 CMSMarkStack* _mark_stack; 1437 HeapWord* _finger; 1438 PushAndMarkVerifyClosure _pam_verify_closure; 1439 public: 1440 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1441 CMSBitMap* verification_bm, 1442 CMSBitMap* cms_bm, 1443 CMSMarkStack* mark_stack); 1444 bool do_bit(size_t offset); 1445 void reset(HeapWord* addr); 1446 }; 1447 1448 1449 // This closure is used to check that a certain set of bits is 1450 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1451 class FalseBitMapClosure: public BitMapClosure { 1452 public: 1453 bool do_bit(size_t offset) { 1454 guarantee(false, "Should not have a 1 bit"); 1455 return true; 1456 } 1457 }; 1458 1459 // This closure is used during the second checkpointing phase 1460 // to rescan the marked objects on the dirty cards in the mod 1461 // union table and the card table proper. It's invoked via 1462 // MarkFromDirtyCardsClosure below. It uses either 1463 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1464 // declared in genOopClosures.hpp to accomplish some of its work. 1465 // In the parallel case the bitMap is shared, so access to 1466 // it needs to be suitably synchronized for updates by embedded 1467 // closures that update it; however, this closure itself only 1468 // reads the bit_map and because it is idempotent, is immune to 1469 // reading stale values. 1470 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1471 #ifdef ASSERT 1472 CMSCollector* _collector; 1473 MemRegion _span; 1474 union { 1475 CMSMarkStack* _mark_stack; 1476 OopTaskQueue* _work_queue; 1477 }; 1478 #endif // ASSERT 1479 bool _parallel; 1480 CMSBitMap* _bit_map; 1481 union { 1482 MarkRefsIntoAndScanClosure* _scan_closure; 1483 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1484 }; 1485 1486 public: 1487 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1488 MemRegion span, 1489 ReferenceProcessor* rp, 1490 CMSBitMap* bit_map, 1491 CMSMarkStack* mark_stack, 1492 CMSMarkStack* revisit_stack, 1493 MarkRefsIntoAndScanClosure* cl): 1494 #ifdef ASSERT 1495 _collector(collector), 1496 _span(span), 1497 _mark_stack(mark_stack), 1498 #endif // ASSERT 1499 _parallel(false), 1500 _bit_map(bit_map), 1501 _scan_closure(cl) { } 1502 1503 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1504 MemRegion span, 1505 ReferenceProcessor* rp, 1506 CMSBitMap* bit_map, 1507 OopTaskQueue* work_queue, 1508 CMSMarkStack* revisit_stack, 1509 Par_MarkRefsIntoAndScanClosure* cl): 1510 #ifdef ASSERT 1511 _collector(collector), 1512 _span(span), 1513 _work_queue(work_queue), 1514 #endif // ASSERT 1515 _parallel(true), 1516 _bit_map(bit_map), 1517 _par_scan_closure(cl) { } 1518 1519 void do_object(oop obj) { 1520 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1521 } 1522 bool do_object_b(oop obj) { 1523 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1524 return false; 1525 } 1526 bool do_object_bm(oop p, MemRegion mr); 1527 }; 1528 1529 // This closure is used during the second checkpointing phase 1530 // to rescan the marked objects on the dirty cards in the mod 1531 // union table and the card table proper. It invokes 1532 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1533 // In the parallel case, the bit map is shared and requires 1534 // synchronized access. 1535 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1536 CompactibleFreeListSpace* _space; 1537 ScanMarkedObjectsAgainClosure _scan_cl; 1538 size_t _num_dirty_cards; 1539 1540 public: 1541 MarkFromDirtyCardsClosure(CMSCollector* collector, 1542 MemRegion span, 1543 CompactibleFreeListSpace* space, 1544 CMSBitMap* bit_map, 1545 CMSMarkStack* mark_stack, 1546 CMSMarkStack* revisit_stack, 1547 MarkRefsIntoAndScanClosure* cl): 1548 _space(space), 1549 _num_dirty_cards(0), 1550 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1551 mark_stack, revisit_stack, cl) { } 1552 1553 MarkFromDirtyCardsClosure(CMSCollector* collector, 1554 MemRegion span, 1555 CompactibleFreeListSpace* space, 1556 CMSBitMap* bit_map, 1557 OopTaskQueue* work_queue, 1558 CMSMarkStack* revisit_stack, 1559 Par_MarkRefsIntoAndScanClosure* cl): 1560 _space(space), 1561 _num_dirty_cards(0), 1562 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1563 work_queue, revisit_stack, cl) { } 1564 1565 void do_MemRegion(MemRegion mr); 1566 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1567 size_t num_dirty_cards() { return _num_dirty_cards; } 1568 }; 1569 1570 // This closure is used in the non-product build to check 1571 // that there are no MemRegions with a certain property. 1572 class FalseMemRegionClosure: public MemRegionClosure { 1573 void do_MemRegion(MemRegion mr) { 1574 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1575 guarantee(false, "Should never be here"); 1576 } 1577 }; 1578 1579 // This closure is used during the precleaning phase 1580 // to "carefully" rescan marked objects on dirty cards. 1581 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1582 // to accomplish some of its work. 1583 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1584 CMSCollector* _collector; 1585 MemRegion _span; 1586 bool _yield; 1587 Mutex* _freelistLock; 1588 CMSBitMap* _bitMap; 1589 CMSMarkStack* _markStack; 1590 MarkRefsIntoAndScanClosure* _scanningClosure; 1591 1592 public: 1593 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1594 MemRegion span, 1595 CMSBitMap* bitMap, 1596 CMSMarkStack* markStack, 1597 CMSMarkStack* revisitStack, 1598 MarkRefsIntoAndScanClosure* cl, 1599 bool should_yield): 1600 _collector(collector), 1601 _span(span), 1602 _yield(should_yield), 1603 _bitMap(bitMap), 1604 _markStack(markStack), 1605 _scanningClosure(cl) { 1606 } 1607 1608 void do_object(oop p) { 1609 guarantee(false, "call do_object_careful instead"); 1610 } 1611 1612 size_t do_object_careful(oop p) { 1613 guarantee(false, "Unexpected caller"); 1614 return 0; 1615 } 1616 1617 size_t do_object_careful_m(oop p, MemRegion mr); 1618 1619 void setFreelistLock(Mutex* m) { 1620 _freelistLock = m; 1621 _scanningClosure->set_freelistLock(m); 1622 } 1623 1624 private: 1625 inline bool do_yield_check(); 1626 1627 void do_yield_work(); 1628 }; 1629 1630 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1631 CMSCollector* _collector; 1632 MemRegion _span; 1633 bool _yield; 1634 CMSBitMap* _bit_map; 1635 CMSMarkStack* _mark_stack; 1636 PushAndMarkClosure* _scanning_closure; 1637 unsigned int _before_count; 1638 1639 public: 1640 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1641 MemRegion span, 1642 CMSBitMap* bit_map, 1643 CMSMarkStack* mark_stack, 1644 PushAndMarkClosure* cl, 1645 unsigned int before_count, 1646 bool should_yield): 1647 _collector(collector), 1648 _span(span), 1649 _yield(should_yield), 1650 _bit_map(bit_map), 1651 _mark_stack(mark_stack), 1652 _scanning_closure(cl), 1653 _before_count(before_count) 1654 { } 1655 1656 void do_object(oop p) { 1657 guarantee(false, "call do_object_careful instead"); 1658 } 1659 1660 size_t do_object_careful(oop p); 1661 1662 size_t do_object_careful_m(oop p, MemRegion mr) { 1663 guarantee(false, "Unexpected caller"); 1664 return 0; 1665 } 1666 1667 private: 1668 inline void do_yield_check(); 1669 void do_yield_work(); 1670 }; 1671 1672 // This closure is used to accomplish the sweeping work 1673 // after the second checkpoint but before the concurrent reset 1674 // phase. 1675 // 1676 // Terminology 1677 // left hand chunk (LHC) - block of one or more chunks currently being 1678 // coalesced. The LHC is available for coalescing with a new chunk. 1679 // right hand chunk (RHC) - block that is currently being swept that is 1680 // free or garbage that can be coalesced with the LHC. 1681 // _inFreeRange is true if there is currently a LHC 1682 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1683 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1684 // _freeFinger is the address of the current LHC 1685 class SweepClosure: public BlkClosureCareful { 1686 CMSCollector* _collector; // collector doing the work 1687 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1688 CompactibleFreeListSpace* _sp; // Space being swept 1689 HeapWord* _limit; 1690 Mutex* _freelistLock; // Free list lock (in space) 1691 CMSBitMap* _bitMap; // Marking bit map (in 1692 // generation) 1693 bool _inFreeRange; // Indicates if we are in the 1694 // midst of a free run 1695 bool _freeRangeInFreeLists; 1696 // Often, we have just found 1697 // a free chunk and started 1698 // a new free range; we do not 1699 // eagerly remove this chunk from 1700 // the free lists unless there is 1701 // a possibility of coalescing. 1702 // When true, this flag indicates 1703 // that the _freeFinger below 1704 // points to a potentially free chunk 1705 // that may still be in the free lists 1706 bool _lastFreeRangeCoalesced; 1707 // free range contains chunks 1708 // coalesced 1709 bool _yield; 1710 // Whether sweeping should be 1711 // done with yields. For instance 1712 // when done by the foreground 1713 // collector we shouldn't yield. 1714 HeapWord* _freeFinger; // When _inFreeRange is set, the 1715 // pointer to the "left hand 1716 // chunk" 1717 size_t _freeRangeSize; 1718 // When _inFreeRange is set, this 1719 // indicates the accumulated size 1720 // of the "left hand chunk" 1721 NOT_PRODUCT( 1722 size_t _numObjectsFreed; 1723 size_t _numWordsFreed; 1724 size_t _numObjectsLive; 1725 size_t _numWordsLive; 1726 size_t _numObjectsAlreadyFree; 1727 size_t _numWordsAlreadyFree; 1728 FreeChunk* _last_fc; 1729 ) 1730 private: 1731 // Code that is common to a free chunk or garbage when 1732 // encountered during sweeping. 1733 void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 1734 size_t chunkSize); 1735 // Process a free chunk during sweeping. 1736 void doAlreadyFreeChunk(FreeChunk *fc); 1737 // Process a garbage chunk during sweeping. 1738 size_t doGarbageChunk(FreeChunk *fc); 1739 // Process a live chunk during sweeping. 1740 size_t doLiveChunk(FreeChunk* fc); 1741 1742 // Accessors. 1743 HeapWord* freeFinger() const { return _freeFinger; } 1744 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1745 size_t freeRangeSize() const { return _freeRangeSize; } 1746 void set_freeRangeSize(size_t v) { _freeRangeSize = v; } 1747 bool inFreeRange() const { return _inFreeRange; } 1748 void set_inFreeRange(bool v) { _inFreeRange = v; } 1749 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1750 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1751 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1752 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1753 1754 // Initialize a free range. 1755 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1756 // Return this chunk to the free lists. 1757 void flushCurFreeChunk(HeapWord* chunk, size_t size); 1758 1759 // Check if we should yield and do so when necessary. 1760 inline void do_yield_check(HeapWord* addr); 1761 1762 // Yield 1763 void do_yield_work(HeapWord* addr); 1764 1765 // Debugging/Printing 1766 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; 1767 1768 public: 1769 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1770 CMSBitMap* bitMap, bool should_yield); 1771 ~SweepClosure(); 1772 1773 size_t do_blk_careful(HeapWord* addr); 1774 }; 1775 1776 // Closures related to weak references processing 1777 1778 // During CMS' weak reference processing, this is a 1779 // work-routine/closure used to complete transitive 1780 // marking of objects as live after a certain point 1781 // in which an initial set has been completely accumulated. 1782 // This closure is currently used both during the final 1783 // remark stop-world phase, as well as during the concurrent 1784 // precleaning of the discovered reference lists. 1785 class CMSDrainMarkingStackClosure: public VoidClosure { 1786 CMSCollector* _collector; 1787 MemRegion _span; 1788 CMSMarkStack* _mark_stack; 1789 CMSBitMap* _bit_map; 1790 CMSKeepAliveClosure* _keep_alive; 1791 bool _concurrent_precleaning; 1792 public: 1793 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1794 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1795 CMSKeepAliveClosure* keep_alive, 1796 bool cpc): 1797 _collector(collector), 1798 _span(span), 1799 _bit_map(bit_map), 1800 _mark_stack(mark_stack), 1801 _keep_alive(keep_alive), 1802 _concurrent_precleaning(cpc) { 1803 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1804 "Mismatch"); 1805 } 1806 1807 void do_void(); 1808 }; 1809 1810 // A parallel version of CMSDrainMarkingStackClosure above. 1811 class CMSParDrainMarkingStackClosure: public VoidClosure { 1812 CMSCollector* _collector; 1813 MemRegion _span; 1814 OopTaskQueue* _work_queue; 1815 CMSBitMap* _bit_map; 1816 CMSInnerParMarkAndPushClosure _mark_and_push; 1817 1818 public: 1819 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1820 MemRegion span, CMSBitMap* bit_map, 1821 CMSMarkStack* revisit_stack, 1822 OopTaskQueue* work_queue): 1823 _collector(collector), 1824 _span(span), 1825 _bit_map(bit_map), 1826 _work_queue(work_queue), 1827 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } 1828 1829 public: 1830 void trim_queue(uint max); 1831 void do_void(); 1832 }; 1833 1834 // Allow yielding or short-circuiting of reference list 1835 // prelceaning work. 1836 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1837 CMSCollector* _collector; 1838 void do_yield_work(); 1839 public: 1840 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1841 _collector(collector) {} 1842 virtual bool should_return(); 1843 }; 1844 1845 1846 // Convenience class that locks free list locks for given CMS collector 1847 class FreelistLocker: public StackObj { 1848 private: 1849 CMSCollector* _collector; 1850 public: 1851 FreelistLocker(CMSCollector* collector): 1852 _collector(collector) { 1853 _collector->getFreelistLocks(); 1854 } 1855 1856 ~FreelistLocker() { 1857 _collector->releaseFreelistLocks(); 1858 } 1859 }; 1860 1861 // Mark all dead objects in a given space. 1862 class MarkDeadObjectsClosure: public BlkClosure { 1863 const CMSCollector* _collector; 1864 const CompactibleFreeListSpace* _sp; 1865 CMSBitMap* _live_bit_map; 1866 CMSBitMap* _dead_bit_map; 1867 public: 1868 MarkDeadObjectsClosure(const CMSCollector* collector, 1869 const CompactibleFreeListSpace* sp, 1870 CMSBitMap *live_bit_map, 1871 CMSBitMap *dead_bit_map) : 1872 _collector(collector), 1873 _sp(sp), 1874 _live_bit_map(live_bit_map), 1875 _dead_bit_map(dead_bit_map) {} 1876 size_t do_blk(HeapWord* addr); 1877 }; 1878 1879 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1880 1881 public: 1882 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase); 1883 TraceCMSMemoryManagerStats(); 1884 }; 1885