1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/generationCounters.hpp" 32 #include "memory/generation.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/virtualspace.hpp" 35 #include "services/memoryService.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/stack.inline.hpp" 38 #include "utilities/taskqueue.hpp" 39 #include "utilities/yieldingWorkgroup.hpp" 40 41 // ConcurrentMarkSweepGeneration is in support of a concurrent 42 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 43 // style. We assume, for now, that this generation is always the 44 // seniormost generation (modulo the PermGeneration), and for simplicity 45 // in the first implementation, that this generation is a single compactible 46 // space. Neither of these restrictions appears essential, and will be 47 // relaxed in the future when more time is available to implement the 48 // greater generality (and there's a need for it). 49 // 50 // Concurrent mode failures are currently handled by 51 // means of a sliding mark-compact. 52 53 class CMSAdaptiveSizePolicy; 54 class CMSConcMarkingTask; 55 class CMSGCAdaptivePolicyCounters; 56 class ConcurrentMarkSweepGeneration; 57 class ConcurrentMarkSweepPolicy; 58 class ConcurrentMarkSweepThread; 59 class CompactibleFreeListSpace; 60 class FreeChunk; 61 class PromotionInfo; 62 class ScanMarkedObjectsAgainCarefullyClosure; 63 64 // A generic CMS bit map. It's the basis for both the CMS marking bit map 65 // as well as for the mod union table (in each case only a subset of the 66 // methods are used). This is essentially a wrapper around the BitMap class, 67 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 68 // we have _shifter == 0. and for the mod union table we have 69 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 70 // XXX 64-bit issues in BitMap? 71 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 72 friend class VMStructs; 73 74 HeapWord* _bmStartWord; // base address of range covered by map 75 size_t _bmWordSize; // map size (in #HeapWords covered) 76 const int _shifter; // shifts to convert HeapWord to bit position 77 VirtualSpace _virtual_space; // underlying the bit map 78 BitMap _bm; // the bit map itself 79 public: 80 Mutex* const _lock; // mutex protecting _bm; 81 82 public: 83 // constructor 84 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 85 86 // allocates the actual storage for the map 87 bool allocate(MemRegion mr); 88 // field getter 89 Mutex* lock() const { return _lock; } 90 // locking verifier convenience function 91 void assert_locked() const PRODUCT_RETURN; 92 93 // inquiries 94 HeapWord* startWord() const { return _bmStartWord; } 95 size_t sizeInWords() const { return _bmWordSize; } 96 size_t sizeInBits() const { return _bm.size(); } 97 // the following is one past the last word in space 98 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 99 100 // reading marks 101 bool isMarked(HeapWord* addr) const; 102 bool par_isMarked(HeapWord* addr) const; // do not lock checks 103 bool isUnmarked(HeapWord* addr) const; 104 bool isAllClear() const; 105 106 // writing marks 107 void mark(HeapWord* addr); 108 // For marking by parallel GC threads; 109 // returns true if we did, false if another thread did 110 bool par_mark(HeapWord* addr); 111 112 void mark_range(MemRegion mr); 113 void par_mark_range(MemRegion mr); 114 void mark_large_range(MemRegion mr); 115 void par_mark_large_range(MemRegion mr); 116 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 117 void clear_range(MemRegion mr); 118 void par_clear_range(MemRegion mr); 119 void clear_large_range(MemRegion mr); 120 void par_clear_large_range(MemRegion mr); 121 void clear_all(); 122 void clear_all_incrementally(); // Not yet implemented!! 123 124 NOT_PRODUCT( 125 // checks the memory region for validity 126 void region_invariant(MemRegion mr); 127 ) 128 129 // iteration 130 void iterate(BitMapClosure* cl) { 131 _bm.iterate(cl); 132 } 133 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 134 void dirty_range_iterate_clear(MemRegionClosure* cl); 135 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 136 137 // auxiliary support for iteration 138 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 139 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 140 HeapWord* end_addr) const; 141 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 142 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 143 HeapWord* end_addr) const; 144 MemRegion getAndClearMarkedRegion(HeapWord* addr); 145 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 146 HeapWord* end_addr); 147 148 // conversion utilities 149 HeapWord* offsetToHeapWord(size_t offset) const; 150 size_t heapWordToOffset(HeapWord* addr) const; 151 size_t heapWordDiffToOffsetDiff(size_t diff) const; 152 153 // debugging 154 // is this address range covered by the bit-map? 155 NOT_PRODUCT( 156 bool covers(MemRegion mr) const; 157 bool covers(HeapWord* start, size_t size = 0) const; 158 ) 159 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 160 }; 161 162 // Represents a marking stack used by the CMS collector. 163 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 164 class CMSMarkStack: public CHeapObj { 165 // 166 friend class CMSCollector; // to get at expasion stats further below 167 // 168 169 VirtualSpace _virtual_space; // space for the stack 170 oop* _base; // bottom of stack 171 size_t _index; // one more than last occupied index 172 size_t _capacity; // max #elements 173 Mutex _par_lock; // an advisory lock used in case of parallel access 174 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 175 176 protected: 177 size_t _hit_limit; // we hit max stack size limit 178 size_t _failed_double; // we failed expansion before hitting limit 179 180 public: 181 CMSMarkStack(): 182 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 183 _hit_limit(0), 184 _failed_double(0) {} 185 186 bool allocate(size_t size); 187 188 size_t capacity() const { return _capacity; } 189 190 oop pop() { 191 if (!isEmpty()) { 192 return _base[--_index] ; 193 } 194 return NULL; 195 } 196 197 bool push(oop ptr) { 198 if (isFull()) { 199 return false; 200 } else { 201 _base[_index++] = ptr; 202 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 203 return true; 204 } 205 } 206 207 bool isEmpty() const { return _index == 0; } 208 bool isFull() const { 209 assert(_index <= _capacity, "buffer overflow"); 210 return _index == _capacity; 211 } 212 213 size_t length() { return _index; } 214 215 // "Parallel versions" of some of the above 216 oop par_pop() { 217 // lock and pop 218 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 219 return pop(); 220 } 221 222 bool par_push(oop ptr) { 223 // lock and push 224 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 225 return push(ptr); 226 } 227 228 // Forcibly reset the stack, losing all of its contents. 229 void reset() { 230 _index = 0; 231 } 232 233 // Expand the stack, typically in response to an overflow condition 234 void expand(); 235 236 // Compute the least valued stack element. 237 oop least_value(HeapWord* low) { 238 oop least = (oop)low; 239 for (size_t i = 0; i < _index; i++) { 240 least = MIN2(least, _base[i]); 241 } 242 return least; 243 } 244 245 // Exposed here to allow stack expansion in || case 246 Mutex* par_lock() { return &_par_lock; } 247 }; 248 249 class CardTableRS; 250 class CMSParGCThreadState; 251 252 class ModUnionClosure: public MemRegionClosure { 253 protected: 254 CMSBitMap* _t; 255 public: 256 ModUnionClosure(CMSBitMap* t): _t(t) { } 257 void do_MemRegion(MemRegion mr); 258 }; 259 260 class ModUnionClosurePar: public ModUnionClosure { 261 public: 262 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 263 void do_MemRegion(MemRegion mr); 264 }; 265 266 // Survivor Chunk Array in support of parallelization of 267 // Survivor Space rescan. 268 class ChunkArray: public CHeapObj { 269 size_t _index; 270 size_t _capacity; 271 size_t _overflows; 272 HeapWord** _array; // storage for array 273 274 public: 275 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 276 ChunkArray(HeapWord** a, size_t c): 277 _index(0), _capacity(c), _overflows(0), _array(a) {} 278 279 HeapWord** array() { return _array; } 280 void set_array(HeapWord** a) { _array = a; } 281 282 size_t capacity() { return _capacity; } 283 void set_capacity(size_t c) { _capacity = c; } 284 285 size_t end() { 286 assert(_index <= capacity(), 287 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 288 _index, _capacity)); 289 return _index; 290 } // exclusive 291 292 HeapWord* nth(size_t n) { 293 assert(n < end(), "Out of bounds access"); 294 return _array[n]; 295 } 296 297 void reset() { 298 _index = 0; 299 if (_overflows > 0 && PrintCMSStatistics > 1) { 300 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 301 _capacity, _overflows); 302 } 303 _overflows = 0; 304 } 305 306 void record_sample(HeapWord* p, size_t sz) { 307 // For now we do not do anything with the size 308 if (_index < _capacity) { 309 _array[_index++] = p; 310 } else { 311 ++_overflows; 312 assert(_index == _capacity, 313 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 314 "): out of bounds at overflow#" SIZE_FORMAT, 315 _index, _capacity, _overflows)); 316 } 317 } 318 }; 319 320 // 321 // Timing, allocation and promotion statistics for gc scheduling and incremental 322 // mode pacing. Most statistics are exponential averages. 323 // 324 class CMSStats VALUE_OBJ_CLASS_SPEC { 325 private: 326 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 327 328 // The following are exponential averages with factor alpha: 329 // avg = (100 - alpha) * avg + alpha * cur_sample 330 // 331 // The durations measure: end_time[n] - start_time[n] 332 // The periods measure: start_time[n] - start_time[n-1] 333 // 334 // The cms period and duration include only concurrent collections; time spent 335 // in foreground cms collections due to System.gc() or because of a failure to 336 // keep up are not included. 337 // 338 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 339 // real value, but is used only after the first period. A value of 100 is 340 // used for the first sample so it gets the entire weight. 341 unsigned int _saved_alpha; // 0-100 342 unsigned int _gc0_alpha; 343 unsigned int _cms_alpha; 344 345 double _gc0_duration; 346 double _gc0_period; 347 size_t _gc0_promoted; // bytes promoted per gc0 348 double _cms_duration; 349 double _cms_duration_pre_sweep; // time from initiation to start of sweep 350 double _cms_duration_per_mb; 351 double _cms_period; 352 size_t _cms_allocated; // bytes of direct allocation per gc0 period 353 354 // Timers. 355 elapsedTimer _cms_timer; 356 TimeStamp _gc0_begin_time; 357 TimeStamp _cms_begin_time; 358 TimeStamp _cms_end_time; 359 360 // Snapshots of the amount used in the CMS generation. 361 size_t _cms_used_at_gc0_begin; 362 size_t _cms_used_at_gc0_end; 363 size_t _cms_used_at_cms_begin; 364 365 // Used to prevent the duty cycle from being reduced in the middle of a cms 366 // cycle. 367 bool _allow_duty_cycle_reduction; 368 369 enum { 370 _GC0_VALID = 0x1, 371 _CMS_VALID = 0x2, 372 _ALL_VALID = _GC0_VALID | _CMS_VALID 373 }; 374 375 unsigned int _valid_bits; 376 377 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 378 379 protected: 380 381 // Return a duty cycle that avoids wild oscillations, by limiting the amount 382 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 383 // as a recommended value). 384 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 385 unsigned int new_duty_cycle); 386 unsigned int icms_update_duty_cycle_impl(); 387 388 // In support of adjusting of cms trigger ratios based on history 389 // of concurrent mode failure. 390 double cms_free_adjustment_factor(size_t free) const; 391 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 392 393 public: 394 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 395 unsigned int alpha = CMSExpAvgFactor); 396 397 // Whether or not the statistics contain valid data; higher level statistics 398 // cannot be called until this returns true (they require at least one young 399 // gen and one cms cycle to have completed). 400 bool valid() const; 401 402 // Record statistics. 403 void record_gc0_begin(); 404 void record_gc0_end(size_t cms_gen_bytes_used); 405 void record_cms_begin(); 406 void record_cms_end(); 407 408 // Allow management of the cms timer, which must be stopped/started around 409 // yield points. 410 elapsedTimer& cms_timer() { return _cms_timer; } 411 void start_cms_timer() { _cms_timer.start(); } 412 void stop_cms_timer() { _cms_timer.stop(); } 413 414 // Basic statistics; units are seconds or bytes. 415 double gc0_period() const { return _gc0_period; } 416 double gc0_duration() const { return _gc0_duration; } 417 size_t gc0_promoted() const { return _gc0_promoted; } 418 double cms_period() const { return _cms_period; } 419 double cms_duration() const { return _cms_duration; } 420 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 421 size_t cms_allocated() const { return _cms_allocated; } 422 423 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 424 425 // Seconds since the last background cms cycle began or ended. 426 double cms_time_since_begin() const; 427 double cms_time_since_end() const; 428 429 // Higher level statistics--caller must check that valid() returns true before 430 // calling. 431 432 // Returns bytes promoted per second of wall clock time. 433 double promotion_rate() const; 434 435 // Returns bytes directly allocated per second of wall clock time. 436 double cms_allocation_rate() const; 437 438 // Rate at which space in the cms generation is being consumed (sum of the 439 // above two). 440 double cms_consumption_rate() const; 441 442 // Returns an estimate of the number of seconds until the cms generation will 443 // fill up, assuming no collection work is done. 444 double time_until_cms_gen_full() const; 445 446 // Returns an estimate of the number of seconds remaining until 447 // the cms generation collection should start. 448 double time_until_cms_start() const; 449 450 // End of higher level statistics. 451 452 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 453 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 454 455 // Update the duty cycle and return the new value. 456 unsigned int icms_update_duty_cycle(); 457 458 // Debugging. 459 void print_on(outputStream* st) const PRODUCT_RETURN; 460 void print() const { print_on(gclog_or_tty); } 461 }; 462 463 // A closure related to weak references processing which 464 // we embed in the CMSCollector, since we need to pass 465 // it to the reference processor for secondary filtering 466 // of references based on reachability of referent; 467 // see role of _is_alive_non_header closure in the 468 // ReferenceProcessor class. 469 // For objects in the CMS generation, this closure checks 470 // if the object is "live" (reachable). Used in weak 471 // reference processing. 472 class CMSIsAliveClosure: public BoolObjectClosure { 473 const MemRegion _span; 474 const CMSBitMap* _bit_map; 475 476 friend class CMSCollector; 477 public: 478 CMSIsAliveClosure(MemRegion span, 479 CMSBitMap* bit_map): 480 _span(span), 481 _bit_map(bit_map) { 482 assert(!span.is_empty(), "Empty span could spell trouble"); 483 } 484 485 void do_object(oop obj) { 486 assert(false, "not to be invoked"); 487 } 488 489 bool do_object_b(oop obj); 490 }; 491 492 493 // Implements AbstractRefProcTaskExecutor for CMS. 494 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 495 public: 496 497 CMSRefProcTaskExecutor(CMSCollector& collector) 498 : _collector(collector) 499 { } 500 501 // Executes a task using worker threads. 502 virtual void execute(ProcessTask& task); 503 virtual void execute(EnqueueTask& task); 504 private: 505 CMSCollector& _collector; 506 }; 507 508 509 class CMSCollector: public CHeapObj { 510 friend class VMStructs; 511 friend class ConcurrentMarkSweepThread; 512 friend class ConcurrentMarkSweepGeneration; 513 friend class CompactibleFreeListSpace; 514 friend class CMSParRemarkTask; 515 friend class CMSConcMarkingTask; 516 friend class CMSRefProcTaskProxy; 517 friend class CMSRefProcTaskExecutor; 518 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 519 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 520 friend class PushOrMarkClosure; // to access _restart_addr 521 friend class Par_PushOrMarkClosure; // to access _restart_addr 522 friend class MarkFromRootsClosure; // -- ditto -- 523 // ... and for clearing cards 524 friend class Par_MarkFromRootsClosure; // to access _restart_addr 525 // ... and for clearing cards 526 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 527 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 528 friend class PushAndMarkVerifyClosure; // -- ditto -- 529 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 530 friend class PushAndMarkClosure; // -- ditto -- 531 friend class Par_PushAndMarkClosure; // -- ditto -- 532 friend class CMSKeepAliveClosure; // -- ditto -- 533 friend class CMSDrainMarkingStackClosure; // -- ditto -- 534 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 535 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 536 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 537 friend class VM_CMS_Operation; 538 friend class VM_CMS_Initial_Mark; 539 friend class VM_CMS_Final_Remark; 540 friend class TraceCMSMemoryManagerStats; 541 542 private: 543 jlong _time_of_last_gc; 544 void update_time_of_last_gc(jlong now) { 545 _time_of_last_gc = now; 546 } 547 548 OopTaskQueueSet* _task_queues; 549 550 // Overflow list of grey objects, threaded through mark-word 551 // Manipulated with CAS in the parallel/multi-threaded case. 552 oop _overflow_list; 553 // The following array-pair keeps track of mark words 554 // displaced for accomodating overflow list above. 555 // This code will likely be revisited under RFE#4922830. 556 Stack<oop> _preserved_oop_stack; 557 Stack<markOop> _preserved_mark_stack; 558 559 int* _hash_seed; 560 561 // In support of multi-threaded concurrent phases 562 YieldingFlexibleWorkGang* _conc_workers; 563 564 // Performance Counters 565 CollectorCounters* _gc_counters; 566 567 // Initialization Errors 568 bool _completed_initialization; 569 570 // In support of ExplicitGCInvokesConcurrent 571 static bool _full_gc_requested; 572 unsigned int _collection_count_start; 573 574 // Should we unload classes this concurrent cycle? 575 bool _should_unload_classes; 576 unsigned int _concurrent_cycles_since_last_unload; 577 unsigned int concurrent_cycles_since_last_unload() const { 578 return _concurrent_cycles_since_last_unload; 579 } 580 // Did we (allow) unload classes in the previous concurrent cycle? 581 bool unloaded_classes_last_cycle() const { 582 return concurrent_cycles_since_last_unload() == 0; 583 } 584 // Root scanning options for perm gen 585 int _roots_scanning_options; 586 int roots_scanning_options() const { return _roots_scanning_options; } 587 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 588 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 589 590 // Verification support 591 CMSBitMap _verification_mark_bm; 592 void verify_after_remark_work_1(); 593 void verify_after_remark_work_2(); 594 595 // true if any verification flag is on. 596 bool _verifying; 597 bool verifying() const { return _verifying; } 598 void set_verifying(bool v) { _verifying = v; } 599 600 // Collector policy 601 ConcurrentMarkSweepPolicy* _collector_policy; 602 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 603 604 // XXX Move these to CMSStats ??? FIX ME !!! 605 elapsedTimer _inter_sweep_timer; // time between sweeps 606 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 607 // padded decaying average estimates of the above 608 AdaptivePaddedAverage _inter_sweep_estimate; 609 AdaptivePaddedAverage _intra_sweep_estimate; 610 611 protected: 612 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 613 ConcurrentMarkSweepGeneration* _permGen; // perm gen 614 MemRegion _span; // span covering above two 615 CardTableRS* _ct; // card table 616 617 // CMS marking support structures 618 CMSBitMap _markBitMap; 619 CMSBitMap _modUnionTable; 620 CMSMarkStack _markStack; 621 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects 622 // to revisit 623 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. 624 625 HeapWord* _restart_addr; // in support of marking stack overflow 626 void lower_restart_addr(HeapWord* low); 627 628 // Counters in support of marking stack / work queue overflow handling: 629 // a non-zero value indicates certain types of overflow events during 630 // the current CMS cycle and could lead to stack resizing efforts at 631 // an opportune future time. 632 size_t _ser_pmc_preclean_ovflw; 633 size_t _ser_pmc_remark_ovflw; 634 size_t _par_pmc_remark_ovflw; 635 size_t _ser_kac_preclean_ovflw; 636 size_t _ser_kac_ovflw; 637 size_t _par_kac_ovflw; 638 NOT_PRODUCT(ssize_t _num_par_pushes;) 639 640 // ("Weak") Reference processing support 641 ReferenceProcessor* _ref_processor; 642 CMSIsAliveClosure _is_alive_closure; 643 // keep this textually after _markBitMap and _span; c'tor dependency 644 645 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 646 ModUnionClosure _modUnionClosure; 647 ModUnionClosurePar _modUnionClosurePar; 648 649 // CMS abstract state machine 650 // initial_state: Idling 651 // next_state(Idling) = {Marking} 652 // next_state(Marking) = {Precleaning, Sweeping} 653 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 654 // next_state(AbortablePreclean) = {FinalMarking} 655 // next_state(FinalMarking) = {Sweeping} 656 // next_state(Sweeping) = {Resizing} 657 // next_state(Resizing) = {Resetting} 658 // next_state(Resetting) = {Idling} 659 // The numeric values below are chosen so that: 660 // . _collectorState <= Idling == post-sweep && pre-mark 661 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 662 // precleaning || abortablePrecleanb 663 public: 664 enum CollectorState { 665 Resizing = 0, 666 Resetting = 1, 667 Idling = 2, 668 InitialMarking = 3, 669 Marking = 4, 670 Precleaning = 5, 671 AbortablePreclean = 6, 672 FinalMarking = 7, 673 Sweeping = 8 674 }; 675 protected: 676 static CollectorState _collectorState; 677 678 // State related to prologue/epilogue invocation for my generations 679 bool _between_prologue_and_epilogue; 680 681 // Signalling/State related to coordination between fore- and backgroud GC 682 // Note: When the baton has been passed from background GC to foreground GC, 683 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 684 static bool _foregroundGCIsActive; // true iff foreground collector is active or 685 // wants to go active 686 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 687 // yet passed the baton to the foreground GC 688 689 // Support for CMSScheduleRemark (abortable preclean) 690 bool _abort_preclean; 691 bool _start_sampling; 692 693 int _numYields; 694 size_t _numDirtyCards; 695 size_t _sweep_count; 696 // number of full gc's since the last concurrent gc. 697 uint _full_gcs_since_conc_gc; 698 699 // occupancy used for bootstrapping stats 700 double _bootstrap_occupancy; 701 702 // timer 703 elapsedTimer _timer; 704 705 // Timing, allocation and promotion statistics, used for scheduling. 706 CMSStats _stats; 707 708 // Allocation limits installed in the young gen, used only in 709 // CMSIncrementalMode. When an allocation in the young gen would cross one of 710 // these limits, the cms generation is notified and the cms thread is started 711 // or stopped, respectively. 712 HeapWord* _icms_start_limit; 713 HeapWord* _icms_stop_limit; 714 715 enum CMS_op_type { 716 CMS_op_checkpointRootsInitial, 717 CMS_op_checkpointRootsFinal 718 }; 719 720 void do_CMS_operation(CMS_op_type op); 721 bool stop_world_and_do(CMS_op_type op); 722 723 OopTaskQueueSet* task_queues() { return _task_queues; } 724 int* hash_seed(int i) { return &_hash_seed[i]; } 725 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 726 727 // Support for parallelizing Eden rescan in CMS remark phase 728 void sample_eden(); // ... sample Eden space top 729 730 private: 731 // Support for parallelizing young gen rescan in CMS remark phase 732 Generation* _young_gen; // the younger gen 733 HeapWord** _top_addr; // ... Top of Eden 734 HeapWord** _end_addr; // ... End of Eden 735 HeapWord** _eden_chunk_array; // ... Eden partitioning array 736 size_t _eden_chunk_index; // ... top (exclusive) of array 737 size_t _eden_chunk_capacity; // ... max entries in array 738 739 // Support for parallelizing survivor space rescan 740 HeapWord** _survivor_chunk_array; 741 size_t _survivor_chunk_index; 742 size_t _survivor_chunk_capacity; 743 size_t* _cursor; 744 ChunkArray* _survivor_plab_array; 745 746 // Support for marking stack overflow handling 747 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 748 bool par_take_from_overflow_list(size_t num, 749 OopTaskQueue* to_work_q, 750 int no_of_gc_threads); 751 void push_on_overflow_list(oop p); 752 void par_push_on_overflow_list(oop p); 753 // the following is, obviously, not, in general, "MT-stable" 754 bool overflow_list_is_empty() const; 755 756 void preserve_mark_if_necessary(oop p); 757 void par_preserve_mark_if_necessary(oop p); 758 void preserve_mark_work(oop p, markOop m); 759 void restore_preserved_marks_if_any(); 760 NOT_PRODUCT(bool no_preserved_marks() const;) 761 // in support of testing overflow code 762 NOT_PRODUCT(int _overflow_counter;) 763 NOT_PRODUCT(bool simulate_overflow();) // sequential 764 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 765 766 // CMS work methods 767 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 768 769 // a return value of false indicates failure due to stack overflow 770 bool markFromRootsWork(bool asynch); // concurrent marking work 771 772 public: // FIX ME!!! only for testing 773 bool do_marking_st(bool asynch); // single-threaded marking 774 bool do_marking_mt(bool asynch); // multi-threaded marking 775 776 private: 777 778 // concurrent precleaning work 779 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 780 ScanMarkedObjectsAgainCarefullyClosure* cl); 781 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 782 ScanMarkedObjectsAgainCarefullyClosure* cl); 783 // Does precleaning work, returning a quantity indicative of 784 // the amount of "useful work" done. 785 size_t preclean_work(bool clean_refs, bool clean_survivors); 786 void abortable_preclean(); // Preclean while looking for possible abort 787 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 788 // Helper function for above; merge-sorts the per-thread plab samples 789 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 790 // Resets (i.e. clears) the per-thread plab sample vectors 791 void reset_survivor_plab_arrays(); 792 793 // final (second) checkpoint work 794 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 795 bool init_mark_was_synchronous); 796 // work routine for parallel version of remark 797 void do_remark_parallel(); 798 // work routine for non-parallel version of remark 799 void do_remark_non_parallel(); 800 // reference processing work routine (during second checkpoint) 801 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 802 803 // concurrent sweeping work 804 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 805 806 // (concurrent) resetting of support data structures 807 void reset(bool asynch); 808 809 // Clear _expansion_cause fields of constituent generations 810 void clear_expansion_cause(); 811 812 // An auxilliary method used to record the ends of 813 // used regions of each generation to limit the extent of sweep 814 void save_sweep_limits(); 815 816 // Resize the generations included in the collector. 817 void compute_new_size(); 818 819 // A work method used by foreground collection to determine 820 // what type of collection (compacting or not, continuing or fresh) 821 // it should do. 822 void decide_foreground_collection_type(bool clear_all_soft_refs, 823 bool* should_compact, bool* should_start_over); 824 825 // A work method used by the foreground collector to do 826 // a mark-sweep-compact. 827 void do_compaction_work(bool clear_all_soft_refs); 828 829 // A work method used by the foreground collector to do 830 // a mark-sweep, after taking over from a possibly on-going 831 // concurrent mark-sweep collection. 832 void do_mark_sweep_work(bool clear_all_soft_refs, 833 CollectorState first_state, bool should_start_over); 834 835 // If the backgrould GC is active, acquire control from the background 836 // GC and do the collection. 837 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 838 839 // For synchronizing passing of control from background to foreground 840 // GC. waitForForegroundGC() is called by the background 841 // collector. It if had to wait for a foreground collection, 842 // it returns true and the background collection should assume 843 // that the collection was finished by the foreground 844 // collector. 845 bool waitForForegroundGC(); 846 847 // Incremental mode triggering: recompute the icms duty cycle and set the 848 // allocation limits in the young gen. 849 void icms_update_allocation_limits(); 850 851 size_t block_size_using_printezis_bits(HeapWord* addr) const; 852 size_t block_size_if_printezis_bits(HeapWord* addr) const; 853 HeapWord* next_card_start_after_block(HeapWord* addr) const; 854 855 void setup_cms_unloading_and_verification_state(); 856 public: 857 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 858 ConcurrentMarkSweepGeneration* permGen, 859 CardTableRS* ct, 860 ConcurrentMarkSweepPolicy* cp); 861 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 862 863 ReferenceProcessor* ref_processor() { return _ref_processor; } 864 void ref_processor_init(); 865 866 Mutex* bitMapLock() const { return _markBitMap.lock(); } 867 static CollectorState abstract_state() { return _collectorState; } 868 869 bool should_abort_preclean() const; // Whether preclean should be aborted. 870 size_t get_eden_used() const; 871 size_t get_eden_capacity() const; 872 873 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 874 875 // locking checks 876 NOT_PRODUCT(static bool have_cms_token();) 877 878 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 879 bool shouldConcurrentCollect(); 880 881 void collect(bool full, 882 bool clear_all_soft_refs, 883 size_t size, 884 bool tlab); 885 void collect_in_background(bool clear_all_soft_refs); 886 void collect_in_foreground(bool clear_all_soft_refs); 887 888 // In support of ExplicitGCInvokesConcurrent 889 static void request_full_gc(unsigned int full_gc_count); 890 // Should we unload classes in a particular concurrent cycle? 891 bool should_unload_classes() const { 892 return _should_unload_classes; 893 } 894 bool update_should_unload_classes(); 895 896 void direct_allocated(HeapWord* start, size_t size); 897 898 // Object is dead if not marked and current phase is sweeping. 899 bool is_dead_obj(oop obj) const; 900 901 // After a promotion (of "start"), do any necessary marking. 902 // If "par", then it's being done by a parallel GC thread. 903 // The last two args indicate if we need precise marking 904 // and if so the size of the object so it can be dirtied 905 // in its entirety. 906 void promoted(bool par, HeapWord* start, 907 bool is_obj_array, size_t obj_size); 908 909 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 910 size_t word_size); 911 912 void getFreelistLocks() const; 913 void releaseFreelistLocks() const; 914 bool haveFreelistLocks() const; 915 916 // GC prologue and epilogue 917 void gc_prologue(bool full); 918 void gc_epilogue(bool full); 919 920 jlong time_of_last_gc(jlong now) { 921 if (_collectorState <= Idling) { 922 // gc not in progress 923 return _time_of_last_gc; 924 } else { 925 // collection in progress 926 return now; 927 } 928 } 929 930 // Support for parallel remark of survivor space 931 void* get_data_recorder(int thr_num); 932 933 CMSBitMap* markBitMap() { return &_markBitMap; } 934 void directAllocated(HeapWord* start, size_t size); 935 936 // main CMS steps and related support 937 void checkpointRootsInitial(bool asynch); 938 bool markFromRoots(bool asynch); // a return value of false indicates failure 939 // due to stack overflow 940 void preclean(); 941 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 942 bool init_mark_was_synchronous); 943 void sweep(bool asynch); 944 945 // Check that the currently executing thread is the expected 946 // one (foreground collector or background collector). 947 static void check_correct_thread_executing() PRODUCT_RETURN; 948 // XXXPERM void print_statistics() PRODUCT_RETURN; 949 950 bool is_cms_reachable(HeapWord* addr); 951 952 // Performance Counter Support 953 CollectorCounters* counters() { return _gc_counters; } 954 955 // timer stuff 956 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 957 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 958 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 959 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 960 961 int yields() { return _numYields; } 962 void resetYields() { _numYields = 0; } 963 void incrementYields() { _numYields++; } 964 void resetNumDirtyCards() { _numDirtyCards = 0; } 965 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 966 size_t numDirtyCards() { return _numDirtyCards; } 967 968 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 969 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 970 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 971 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 972 size_t sweep_count() const { return _sweep_count; } 973 void increment_sweep_count() { _sweep_count++; } 974 975 // Timers/stats for gc scheduling and incremental mode pacing. 976 CMSStats& stats() { return _stats; } 977 978 // Convenience methods that check whether CMSIncrementalMode is enabled and 979 // forward to the corresponding methods in ConcurrentMarkSweepThread. 980 static void start_icms(); 981 static void stop_icms(); // Called at the end of the cms cycle. 982 static void disable_icms(); // Called before a foreground collection. 983 static void enable_icms(); // Called after a foreground collection. 984 void icms_wait(); // Called at yield points. 985 986 // Adaptive size policy 987 CMSAdaptiveSizePolicy* size_policy(); 988 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 989 990 // debugging 991 void verify(bool); 992 bool verify_after_remark(); 993 void verify_ok_to_terminate() const PRODUCT_RETURN; 994 void verify_work_stacks_empty() const PRODUCT_RETURN; 995 void verify_overflow_empty() const PRODUCT_RETURN; 996 997 // convenience methods in support of debugging 998 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 999 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 1000 1001 // accessors 1002 CMSMarkStack* verification_mark_stack() { return &_markStack; } 1003 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 1004 1005 // Get the bit map with a perm gen "deadness" information. 1006 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } 1007 1008 // Initialization errors 1009 bool completed_initialization() { return _completed_initialization; } 1010 }; 1011 1012 class CMSExpansionCause : public AllStatic { 1013 public: 1014 enum Cause { 1015 _no_expansion, 1016 _satisfy_free_ratio, 1017 _satisfy_promotion, 1018 _satisfy_allocation, 1019 _allocate_par_lab, 1020 _allocate_par_spooling_space, 1021 _adaptive_size_policy 1022 }; 1023 // Return a string describing the cause of the expansion. 1024 static const char* to_string(CMSExpansionCause::Cause cause); 1025 }; 1026 1027 class ConcurrentMarkSweepGeneration: public CardGeneration { 1028 friend class VMStructs; 1029 friend class ConcurrentMarkSweepThread; 1030 friend class ConcurrentMarkSweep; 1031 friend class CMSCollector; 1032 protected: 1033 static CMSCollector* _collector; // the collector that collects us 1034 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1035 1036 // Performance Counters 1037 GenerationCounters* _gen_counters; 1038 GSpaceCounters* _space_counters; 1039 1040 // Words directly allocated, used by CMSStats. 1041 size_t _direct_allocated_words; 1042 1043 // Non-product stat counters 1044 NOT_PRODUCT( 1045 size_t _numObjectsPromoted; 1046 size_t _numWordsPromoted; 1047 size_t _numObjectsAllocated; 1048 size_t _numWordsAllocated; 1049 ) 1050 1051 // Used for sizing decisions 1052 bool _incremental_collection_failed; 1053 bool incremental_collection_failed() { 1054 return _incremental_collection_failed; 1055 } 1056 void set_incremental_collection_failed() { 1057 _incremental_collection_failed = true; 1058 } 1059 void clear_incremental_collection_failed() { 1060 _incremental_collection_failed = false; 1061 } 1062 1063 // accessors 1064 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1065 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1066 1067 private: 1068 // For parallel young-gen GC support. 1069 CMSParGCThreadState** _par_gc_thread_states; 1070 1071 // Reason generation was expanded 1072 CMSExpansionCause::Cause _expansion_cause; 1073 1074 // In support of MinChunkSize being larger than min object size 1075 const double _dilatation_factor; 1076 1077 enum CollectionTypes { 1078 Concurrent_collection_type = 0, 1079 MS_foreground_collection_type = 1, 1080 MSC_foreground_collection_type = 2, 1081 Unknown_collection_type = 3 1082 }; 1083 1084 CollectionTypes _debug_collection_type; 1085 1086 // Fraction of current occupancy at which to start a CMS collection which 1087 // will collect this generation (at least). 1088 double _initiating_occupancy; 1089 1090 protected: 1091 // Shrink generation by specified size (returns false if unable to shrink) 1092 virtual void shrink_by(size_t bytes); 1093 1094 // Update statistics for GC 1095 virtual void update_gc_stats(int level, bool full); 1096 1097 // Maximum available space in the generation (including uncommitted) 1098 // space. 1099 size_t max_available() const; 1100 1101 // getter and initializer for _initiating_occupancy field. 1102 double initiating_occupancy() const { return _initiating_occupancy; } 1103 void init_initiating_occupancy(intx io, intx tr); 1104 1105 public: 1106 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1107 int level, CardTableRS* ct, 1108 bool use_adaptive_freelists, 1109 FreeBlockDictionary::DictionaryChoice); 1110 1111 // Accessors 1112 CMSCollector* collector() const { return _collector; } 1113 static void set_collector(CMSCollector* collector) { 1114 assert(_collector == NULL, "already set"); 1115 _collector = collector; 1116 } 1117 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1118 1119 Mutex* freelistLock() const; 1120 1121 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1122 1123 // Adaptive size policy 1124 CMSAdaptiveSizePolicy* size_policy(); 1125 1126 bool refs_discovery_is_atomic() const { return false; } 1127 bool refs_discovery_is_mt() const { 1128 // Note: CMS does MT-discovery during the parallel-remark 1129 // phases. Use ReferenceProcessorMTMutator to make refs 1130 // discovery MT-safe during such phases or other parallel 1131 // discovery phases in the future. This may all go away 1132 // if/when we decide that refs discovery is sufficiently 1133 // rare that the cost of the CAS's involved is in the 1134 // noise. That's a measurement that should be done, and 1135 // the code simplified if that turns out to be the case. 1136 return false; 1137 } 1138 1139 // Override 1140 virtual void ref_processor_init(); 1141 1142 // Grow generation by specified size (returns false if unable to grow) 1143 bool grow_by(size_t bytes); 1144 // Grow generation to reserved size. 1145 bool grow_to_reserved(); 1146 1147 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1148 1149 // Space enquiries 1150 size_t capacity() const; 1151 size_t used() const; 1152 size_t free() const; 1153 double occupancy() const { return ((double)used())/((double)capacity()); } 1154 size_t contiguous_available() const; 1155 size_t unsafe_max_alloc_nogc() const; 1156 1157 // over-rides 1158 MemRegion used_region() const; 1159 MemRegion used_region_at_save_marks() const; 1160 1161 // Does a "full" (forced) collection invoked on this generation collect 1162 // all younger generations as well? Note that the second conjunct is a 1163 // hack to allow the collection of the younger gen first if the flag is 1164 // set. This is better than using th policy's should_collect_gen0_first() 1165 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1166 virtual bool full_collects_younger_generations() const { 1167 return UseCMSCompactAtFullCollection && !CollectGen0First; 1168 } 1169 1170 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1171 1172 // Support for compaction 1173 CompactibleSpace* first_compaction_space() const; 1174 // Adjust quantites in the generation affected by 1175 // the compaction. 1176 void reset_after_compaction(); 1177 1178 // Allocation support 1179 HeapWord* allocate(size_t size, bool tlab); 1180 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1181 oop promote(oop obj, size_t obj_size); 1182 HeapWord* par_allocate(size_t size, bool tlab) { 1183 return allocate(size, tlab); 1184 } 1185 1186 // Incremental mode triggering. 1187 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1188 size_t word_size); 1189 1190 // Used by CMSStats to track direct allocation. The value is sampled and 1191 // reset after each young gen collection. 1192 size_t direct_allocated_words() const { return _direct_allocated_words; } 1193 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1194 1195 // Overrides for parallel promotion. 1196 virtual oop par_promote(int thread_num, 1197 oop obj, markOop m, size_t word_sz); 1198 // This one should not be called for CMS. 1199 virtual void par_promote_alloc_undo(int thread_num, 1200 HeapWord* obj, size_t word_sz); 1201 virtual void par_promote_alloc_done(int thread_num); 1202 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1203 1204 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, 1205 bool younger_handles_promotion_failure) const; 1206 1207 // Inform this (non-young) generation that a promotion failure was 1208 // encountered during a collection of a younger generation that 1209 // promotes into this generation. 1210 virtual void promotion_failure_occurred(); 1211 1212 bool should_collect(bool full, size_t size, bool tlab); 1213 virtual bool should_concurrent_collect() const; 1214 virtual bool is_too_full() const; 1215 void collect(bool full, 1216 bool clear_all_soft_refs, 1217 size_t size, 1218 bool tlab); 1219 1220 HeapWord* expand_and_allocate(size_t word_size, 1221 bool tlab, 1222 bool parallel = false); 1223 1224 // GC prologue and epilogue 1225 void gc_prologue(bool full); 1226 void gc_prologue_work(bool full, bool registerClosure, 1227 ModUnionClosure* modUnionClosure); 1228 void gc_epilogue(bool full); 1229 void gc_epilogue_work(bool full); 1230 1231 // Time since last GC of this generation 1232 jlong time_of_last_gc(jlong now) { 1233 return collector()->time_of_last_gc(now); 1234 } 1235 void update_time_of_last_gc(jlong now) { 1236 collector()-> update_time_of_last_gc(now); 1237 } 1238 1239 // Allocation failure 1240 void expand(size_t bytes, size_t expand_bytes, 1241 CMSExpansionCause::Cause cause); 1242 virtual bool expand(size_t bytes, size_t expand_bytes); 1243 void shrink(size_t bytes); 1244 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1245 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1246 1247 // Iteration support and related enquiries 1248 void save_marks(); 1249 bool no_allocs_since_save_marks(); 1250 void object_iterate_since_last_GC(ObjectClosure* cl); 1251 void younger_refs_iterate(OopsInGenClosure* cl); 1252 1253 // Iteration support specific to CMS generations 1254 void save_sweep_limit(); 1255 1256 // More iteration support 1257 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 1258 virtual void oop_iterate(OopClosure* cl); 1259 virtual void safe_object_iterate(ObjectClosure* cl); 1260 virtual void object_iterate(ObjectClosure* cl); 1261 1262 // Need to declare the full complement of closures, whether we'll 1263 // override them or not, or get message from the compiler: 1264 // oop_since_save_marks_iterate_nv hides virtual function... 1265 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1266 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1267 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1268 1269 // Smart allocation XXX -- move to CFLSpace? 1270 void setNearLargestChunk(); 1271 bool isNearLargestChunk(HeapWord* addr); 1272 1273 // Get the chunk at the end of the space. Delagates to 1274 // the space. 1275 FreeChunk* find_chunk_at_end(); 1276 1277 // Overriding of unused functionality (sharing not yet supported with CMS) 1278 void pre_adjust_pointers(); 1279 void post_compact(); 1280 1281 // Debugging 1282 void prepare_for_verify(); 1283 void verify(bool allow_dirty); 1284 void print_statistics() PRODUCT_RETURN; 1285 1286 // Performance Counters support 1287 virtual void update_counters(); 1288 virtual void update_counters(size_t used); 1289 void initialize_performance_counters(); 1290 CollectorCounters* counters() { return collector()->counters(); } 1291 1292 // Support for parallel remark of survivor space 1293 void* get_data_recorder(int thr_num) { 1294 //Delegate to collector 1295 return collector()->get_data_recorder(thr_num); 1296 } 1297 1298 // Printing 1299 const char* name() const; 1300 virtual const char* short_name() const { return "CMS"; } 1301 void print() const; 1302 void printOccupancy(const char* s); 1303 bool must_be_youngest() const { return false; } 1304 bool must_be_oldest() const { return true; } 1305 1306 void compute_new_size(); 1307 1308 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1309 void rotate_debug_collection_type(); 1310 }; 1311 1312 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1313 1314 // Return the size policy from the heap's collector 1315 // policy casted to CMSAdaptiveSizePolicy*. 1316 CMSAdaptiveSizePolicy* cms_size_policy() const; 1317 1318 // Resize the generation based on the adaptive size 1319 // policy. 1320 void resize(size_t cur_promo, size_t desired_promo); 1321 1322 // Return the GC counters from the collector policy 1323 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1324 1325 virtual void shrink_by(size_t bytes); 1326 1327 public: 1328 virtual void compute_new_size(); 1329 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1330 int level, CardTableRS* ct, 1331 bool use_adaptive_freelists, 1332 FreeBlockDictionary::DictionaryChoice 1333 dictionaryChoice) : 1334 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1335 use_adaptive_freelists, dictionaryChoice) {} 1336 1337 virtual const char* short_name() const { return "ASCMS"; } 1338 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1339 1340 virtual void update_counters(); 1341 virtual void update_counters(size_t used); 1342 }; 1343 1344 // 1345 // Closures of various sorts used by CMS to accomplish its work 1346 // 1347 1348 // This closure is used to check that a certain set of oops is empty. 1349 class FalseClosure: public OopClosure { 1350 public: 1351 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } 1352 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } 1353 }; 1354 1355 // This closure is used to do concurrent marking from the roots 1356 // following the first checkpoint. 1357 class MarkFromRootsClosure: public BitMapClosure { 1358 CMSCollector* _collector; 1359 MemRegion _span; 1360 CMSBitMap* _bitMap; 1361 CMSBitMap* _mut; 1362 CMSMarkStack* _markStack; 1363 CMSMarkStack* _revisitStack; 1364 bool _yield; 1365 int _skipBits; 1366 HeapWord* _finger; 1367 HeapWord* _threshold; 1368 DEBUG_ONLY(bool _verifying;) 1369 1370 public: 1371 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1372 CMSBitMap* bitMap, 1373 CMSMarkStack* markStack, 1374 CMSMarkStack* revisitStack, 1375 bool should_yield, bool verifying = false); 1376 bool do_bit(size_t offset); 1377 void reset(HeapWord* addr); 1378 inline void do_yield_check(); 1379 1380 private: 1381 void scanOopsInOop(HeapWord* ptr); 1382 void do_yield_work(); 1383 }; 1384 1385 // This closure is used to do concurrent multi-threaded 1386 // marking from the roots following the first checkpoint. 1387 // XXX This should really be a subclass of The serial version 1388 // above, but i have not had the time to refactor things cleanly. 1389 // That willbe done for Dolphin. 1390 class Par_MarkFromRootsClosure: public BitMapClosure { 1391 CMSCollector* _collector; 1392 MemRegion _whole_span; 1393 MemRegion _span; 1394 CMSBitMap* _bit_map; 1395 CMSBitMap* _mut; 1396 OopTaskQueue* _work_queue; 1397 CMSMarkStack* _overflow_stack; 1398 CMSMarkStack* _revisit_stack; 1399 bool _yield; 1400 int _skip_bits; 1401 HeapWord* _finger; 1402 HeapWord* _threshold; 1403 CMSConcMarkingTask* _task; 1404 public: 1405 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1406 MemRegion span, 1407 CMSBitMap* bit_map, 1408 OopTaskQueue* work_queue, 1409 CMSMarkStack* overflow_stack, 1410 CMSMarkStack* revisit_stack, 1411 bool should_yield); 1412 bool do_bit(size_t offset); 1413 inline void do_yield_check(); 1414 1415 private: 1416 void scan_oops_in_oop(HeapWord* ptr); 1417 void do_yield_work(); 1418 bool get_work_from_overflow_stack(); 1419 }; 1420 1421 // The following closures are used to do certain kinds of verification of 1422 // CMS marking. 1423 class PushAndMarkVerifyClosure: public OopClosure { 1424 CMSCollector* _collector; 1425 MemRegion _span; 1426 CMSBitMap* _verification_bm; 1427 CMSBitMap* _cms_bm; 1428 CMSMarkStack* _mark_stack; 1429 protected: 1430 void do_oop(oop p); 1431 template <class T> inline void do_oop_work(T *p) { 1432 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 1433 do_oop(obj); 1434 } 1435 public: 1436 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1437 MemRegion span, 1438 CMSBitMap* verification_bm, 1439 CMSBitMap* cms_bm, 1440 CMSMarkStack* mark_stack); 1441 void do_oop(oop* p); 1442 void do_oop(narrowOop* p); 1443 // Deal with a stack overflow condition 1444 void handle_stack_overflow(HeapWord* lost); 1445 }; 1446 1447 class MarkFromRootsVerifyClosure: public BitMapClosure { 1448 CMSCollector* _collector; 1449 MemRegion _span; 1450 CMSBitMap* _verification_bm; 1451 CMSBitMap* _cms_bm; 1452 CMSMarkStack* _mark_stack; 1453 HeapWord* _finger; 1454 PushAndMarkVerifyClosure _pam_verify_closure; 1455 public: 1456 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1457 CMSBitMap* verification_bm, 1458 CMSBitMap* cms_bm, 1459 CMSMarkStack* mark_stack); 1460 bool do_bit(size_t offset); 1461 void reset(HeapWord* addr); 1462 }; 1463 1464 1465 // This closure is used to check that a certain set of bits is 1466 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1467 class FalseBitMapClosure: public BitMapClosure { 1468 public: 1469 bool do_bit(size_t offset) { 1470 guarantee(false, "Should not have a 1 bit"); 1471 return true; 1472 } 1473 }; 1474 1475 // This closure is used during the second checkpointing phase 1476 // to rescan the marked objects on the dirty cards in the mod 1477 // union table and the card table proper. It's invoked via 1478 // MarkFromDirtyCardsClosure below. It uses either 1479 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1480 // declared in genOopClosures.hpp to accomplish some of its work. 1481 // In the parallel case the bitMap is shared, so access to 1482 // it needs to be suitably synchronized for updates by embedded 1483 // closures that update it; however, this closure itself only 1484 // reads the bit_map and because it is idempotent, is immune to 1485 // reading stale values. 1486 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1487 #ifdef ASSERT 1488 CMSCollector* _collector; 1489 MemRegion _span; 1490 union { 1491 CMSMarkStack* _mark_stack; 1492 OopTaskQueue* _work_queue; 1493 }; 1494 #endif // ASSERT 1495 bool _parallel; 1496 CMSBitMap* _bit_map; 1497 union { 1498 MarkRefsIntoAndScanClosure* _scan_closure; 1499 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1500 }; 1501 1502 public: 1503 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1504 MemRegion span, 1505 ReferenceProcessor* rp, 1506 CMSBitMap* bit_map, 1507 CMSMarkStack* mark_stack, 1508 CMSMarkStack* revisit_stack, 1509 MarkRefsIntoAndScanClosure* cl): 1510 #ifdef ASSERT 1511 _collector(collector), 1512 _span(span), 1513 _mark_stack(mark_stack), 1514 #endif // ASSERT 1515 _parallel(false), 1516 _bit_map(bit_map), 1517 _scan_closure(cl) { } 1518 1519 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1520 MemRegion span, 1521 ReferenceProcessor* rp, 1522 CMSBitMap* bit_map, 1523 OopTaskQueue* work_queue, 1524 CMSMarkStack* revisit_stack, 1525 Par_MarkRefsIntoAndScanClosure* cl): 1526 #ifdef ASSERT 1527 _collector(collector), 1528 _span(span), 1529 _work_queue(work_queue), 1530 #endif // ASSERT 1531 _parallel(true), 1532 _bit_map(bit_map), 1533 _par_scan_closure(cl) { } 1534 1535 void do_object(oop obj) { 1536 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1537 } 1538 bool do_object_b(oop obj) { 1539 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1540 return false; 1541 } 1542 bool do_object_bm(oop p, MemRegion mr); 1543 }; 1544 1545 // This closure is used during the second checkpointing phase 1546 // to rescan the marked objects on the dirty cards in the mod 1547 // union table and the card table proper. It invokes 1548 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1549 // In the parallel case, the bit map is shared and requires 1550 // synchronized access. 1551 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1552 CompactibleFreeListSpace* _space; 1553 ScanMarkedObjectsAgainClosure _scan_cl; 1554 size_t _num_dirty_cards; 1555 1556 public: 1557 MarkFromDirtyCardsClosure(CMSCollector* collector, 1558 MemRegion span, 1559 CompactibleFreeListSpace* space, 1560 CMSBitMap* bit_map, 1561 CMSMarkStack* mark_stack, 1562 CMSMarkStack* revisit_stack, 1563 MarkRefsIntoAndScanClosure* cl): 1564 _space(space), 1565 _num_dirty_cards(0), 1566 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1567 mark_stack, revisit_stack, cl) { } 1568 1569 MarkFromDirtyCardsClosure(CMSCollector* collector, 1570 MemRegion span, 1571 CompactibleFreeListSpace* space, 1572 CMSBitMap* bit_map, 1573 OopTaskQueue* work_queue, 1574 CMSMarkStack* revisit_stack, 1575 Par_MarkRefsIntoAndScanClosure* cl): 1576 _space(space), 1577 _num_dirty_cards(0), 1578 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1579 work_queue, revisit_stack, cl) { } 1580 1581 void do_MemRegion(MemRegion mr); 1582 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1583 size_t num_dirty_cards() { return _num_dirty_cards; } 1584 }; 1585 1586 // This closure is used in the non-product build to check 1587 // that there are no MemRegions with a certain property. 1588 class FalseMemRegionClosure: public MemRegionClosure { 1589 void do_MemRegion(MemRegion mr) { 1590 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1591 guarantee(false, "Should never be here"); 1592 } 1593 }; 1594 1595 // This closure is used during the precleaning phase 1596 // to "carefully" rescan marked objects on dirty cards. 1597 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1598 // to accomplish some of its work. 1599 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1600 CMSCollector* _collector; 1601 MemRegion _span; 1602 bool _yield; 1603 Mutex* _freelistLock; 1604 CMSBitMap* _bitMap; 1605 CMSMarkStack* _markStack; 1606 MarkRefsIntoAndScanClosure* _scanningClosure; 1607 1608 public: 1609 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1610 MemRegion span, 1611 CMSBitMap* bitMap, 1612 CMSMarkStack* markStack, 1613 CMSMarkStack* revisitStack, 1614 MarkRefsIntoAndScanClosure* cl, 1615 bool should_yield): 1616 _collector(collector), 1617 _span(span), 1618 _yield(should_yield), 1619 _bitMap(bitMap), 1620 _markStack(markStack), 1621 _scanningClosure(cl) { 1622 } 1623 1624 void do_object(oop p) { 1625 guarantee(false, "call do_object_careful instead"); 1626 } 1627 1628 size_t do_object_careful(oop p) { 1629 guarantee(false, "Unexpected caller"); 1630 return 0; 1631 } 1632 1633 size_t do_object_careful_m(oop p, MemRegion mr); 1634 1635 void setFreelistLock(Mutex* m) { 1636 _freelistLock = m; 1637 _scanningClosure->set_freelistLock(m); 1638 } 1639 1640 private: 1641 inline bool do_yield_check(); 1642 1643 void do_yield_work(); 1644 }; 1645 1646 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1647 CMSCollector* _collector; 1648 MemRegion _span; 1649 bool _yield; 1650 CMSBitMap* _bit_map; 1651 CMSMarkStack* _mark_stack; 1652 PushAndMarkClosure* _scanning_closure; 1653 unsigned int _before_count; 1654 1655 public: 1656 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1657 MemRegion span, 1658 CMSBitMap* bit_map, 1659 CMSMarkStack* mark_stack, 1660 PushAndMarkClosure* cl, 1661 unsigned int before_count, 1662 bool should_yield): 1663 _collector(collector), 1664 _span(span), 1665 _yield(should_yield), 1666 _bit_map(bit_map), 1667 _mark_stack(mark_stack), 1668 _scanning_closure(cl), 1669 _before_count(before_count) 1670 { } 1671 1672 void do_object(oop p) { 1673 guarantee(false, "call do_object_careful instead"); 1674 } 1675 1676 size_t do_object_careful(oop p); 1677 1678 size_t do_object_careful_m(oop p, MemRegion mr) { 1679 guarantee(false, "Unexpected caller"); 1680 return 0; 1681 } 1682 1683 private: 1684 inline void do_yield_check(); 1685 void do_yield_work(); 1686 }; 1687 1688 // This closure is used to accomplish the sweeping work 1689 // after the second checkpoint but before the concurrent reset 1690 // phase. 1691 // 1692 // Terminology 1693 // left hand chunk (LHC) - block of one or more chunks currently being 1694 // coalesced. The LHC is available for coalescing with a new chunk. 1695 // right hand chunk (RHC) - block that is currently being swept that is 1696 // free or garbage that can be coalesced with the LHC. 1697 // _inFreeRange is true if there is currently a LHC 1698 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1699 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1700 // _freeFinger is the address of the current LHC 1701 class SweepClosure: public BlkClosureCareful { 1702 CMSCollector* _collector; // collector doing the work 1703 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1704 CompactibleFreeListSpace* _sp; // Space being swept 1705 HeapWord* _limit; 1706 Mutex* _freelistLock; // Free list lock (in space) 1707 CMSBitMap* _bitMap; // Marking bit map (in 1708 // generation) 1709 bool _inFreeRange; // Indicates if we are in the 1710 // midst of a free run 1711 bool _freeRangeInFreeLists; 1712 // Often, we have just found 1713 // a free chunk and started 1714 // a new free range; we do not 1715 // eagerly remove this chunk from 1716 // the free lists unless there is 1717 // a possibility of coalescing. 1718 // When true, this flag indicates 1719 // that the _freeFinger below 1720 // points to a potentially free chunk 1721 // that may still be in the free lists 1722 bool _lastFreeRangeCoalesced; 1723 // free range contains chunks 1724 // coalesced 1725 bool _yield; 1726 // Whether sweeping should be 1727 // done with yields. For instance 1728 // when done by the foreground 1729 // collector we shouldn't yield. 1730 HeapWord* _freeFinger; // When _inFreeRange is set, the 1731 // pointer to the "left hand 1732 // chunk" 1733 size_t _freeRangeSize; 1734 // When _inFreeRange is set, this 1735 // indicates the accumulated size 1736 // of the "left hand chunk" 1737 NOT_PRODUCT( 1738 size_t _numObjectsFreed; 1739 size_t _numWordsFreed; 1740 size_t _numObjectsLive; 1741 size_t _numWordsLive; 1742 size_t _numObjectsAlreadyFree; 1743 size_t _numWordsAlreadyFree; 1744 FreeChunk* _last_fc; 1745 ) 1746 private: 1747 // Code that is common to a free chunk or garbage when 1748 // encountered during sweeping. 1749 void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 1750 size_t chunkSize); 1751 // Process a free chunk during sweeping. 1752 void doAlreadyFreeChunk(FreeChunk *fc); 1753 // Process a garbage chunk during sweeping. 1754 size_t doGarbageChunk(FreeChunk *fc); 1755 // Process a live chunk during sweeping. 1756 size_t doLiveChunk(FreeChunk* fc); 1757 1758 // Accessors. 1759 HeapWord* freeFinger() const { return _freeFinger; } 1760 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1761 size_t freeRangeSize() const { return _freeRangeSize; } 1762 void set_freeRangeSize(size_t v) { _freeRangeSize = v; } 1763 bool inFreeRange() const { return _inFreeRange; } 1764 void set_inFreeRange(bool v) { _inFreeRange = v; } 1765 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1766 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1767 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1768 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1769 1770 // Initialize a free range. 1771 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1772 // Return this chunk to the free lists. 1773 void flushCurFreeChunk(HeapWord* chunk, size_t size); 1774 1775 // Check if we should yield and do so when necessary. 1776 inline void do_yield_check(HeapWord* addr); 1777 1778 // Yield 1779 void do_yield_work(HeapWord* addr); 1780 1781 // Debugging/Printing 1782 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; 1783 1784 public: 1785 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1786 CMSBitMap* bitMap, bool should_yield); 1787 ~SweepClosure(); 1788 1789 size_t do_blk_careful(HeapWord* addr); 1790 }; 1791 1792 // Closures related to weak references processing 1793 1794 // During CMS' weak reference processing, this is a 1795 // work-routine/closure used to complete transitive 1796 // marking of objects as live after a certain point 1797 // in which an initial set has been completely accumulated. 1798 // This closure is currently used both during the final 1799 // remark stop-world phase, as well as during the concurrent 1800 // precleaning of the discovered reference lists. 1801 class CMSDrainMarkingStackClosure: public VoidClosure { 1802 CMSCollector* _collector; 1803 MemRegion _span; 1804 CMSMarkStack* _mark_stack; 1805 CMSBitMap* _bit_map; 1806 CMSKeepAliveClosure* _keep_alive; 1807 bool _concurrent_precleaning; 1808 public: 1809 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1810 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1811 CMSKeepAliveClosure* keep_alive, 1812 bool cpc): 1813 _collector(collector), 1814 _span(span), 1815 _bit_map(bit_map), 1816 _mark_stack(mark_stack), 1817 _keep_alive(keep_alive), 1818 _concurrent_precleaning(cpc) { 1819 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1820 "Mismatch"); 1821 } 1822 1823 void do_void(); 1824 }; 1825 1826 // A parallel version of CMSDrainMarkingStackClosure above. 1827 class CMSParDrainMarkingStackClosure: public VoidClosure { 1828 CMSCollector* _collector; 1829 MemRegion _span; 1830 OopTaskQueue* _work_queue; 1831 CMSBitMap* _bit_map; 1832 CMSInnerParMarkAndPushClosure _mark_and_push; 1833 1834 public: 1835 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1836 MemRegion span, CMSBitMap* bit_map, 1837 CMSMarkStack* revisit_stack, 1838 OopTaskQueue* work_queue): 1839 _collector(collector), 1840 _span(span), 1841 _bit_map(bit_map), 1842 _work_queue(work_queue), 1843 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } 1844 1845 public: 1846 void trim_queue(uint max); 1847 void do_void(); 1848 }; 1849 1850 // Allow yielding or short-circuiting of reference list 1851 // prelceaning work. 1852 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1853 CMSCollector* _collector; 1854 void do_yield_work(); 1855 public: 1856 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1857 _collector(collector) {} 1858 virtual bool should_return(); 1859 }; 1860 1861 1862 // Convenience class that locks free list locks for given CMS collector 1863 class FreelistLocker: public StackObj { 1864 private: 1865 CMSCollector* _collector; 1866 public: 1867 FreelistLocker(CMSCollector* collector): 1868 _collector(collector) { 1869 _collector->getFreelistLocks(); 1870 } 1871 1872 ~FreelistLocker() { 1873 _collector->releaseFreelistLocks(); 1874 } 1875 }; 1876 1877 // Mark all dead objects in a given space. 1878 class MarkDeadObjectsClosure: public BlkClosure { 1879 const CMSCollector* _collector; 1880 const CompactibleFreeListSpace* _sp; 1881 CMSBitMap* _live_bit_map; 1882 CMSBitMap* _dead_bit_map; 1883 public: 1884 MarkDeadObjectsClosure(const CMSCollector* collector, 1885 const CompactibleFreeListSpace* sp, 1886 CMSBitMap *live_bit_map, 1887 CMSBitMap *dead_bit_map) : 1888 _collector(collector), 1889 _sp(sp), 1890 _live_bit_map(live_bit_map), 1891 _dead_bit_map(dead_bit_map) {} 1892 size_t do_blk(HeapWord* addr); 1893 }; 1894 1895 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1896 1897 public: 1898 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase); 1899 TraceCMSMemoryManagerStats(); 1900 }; 1901 1902 1903 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP