1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/gcWhen.hpp" 32 #include "gc_implementation/shared/generationCounters.hpp" 33 #include "memory/cardGeneration.hpp" 34 #include "memory/freeBlockDictionary.hpp" 35 #include "memory/iterator.hpp" 36 #include "memory/space.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/virtualspace.hpp" 39 #include "services/memoryService.hpp" 40 #include "utilities/bitMap.inline.hpp" 41 #include "utilities/stack.inline.hpp" 42 #include "utilities/taskqueue.hpp" 43 #include "utilities/yieldingWorkgroup.hpp" 44 45 // ConcurrentMarkSweepGeneration is in support of a concurrent 46 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 47 // style. We assume, for now, that this generation is always the 48 // seniormost generation and for simplicity 49 // in the first implementation, that this generation is a single compactible 50 // space. Neither of these restrictions appears essential, and will be 51 // relaxed in the future when more time is available to implement the 52 // greater generality (and there's a need for it). 53 // 54 // Concurrent mode failures are currently handled by 55 // means of a sliding mark-compact. 56 57 class AdaptiveSizePolicy; 58 class CMSConcMarkingTask; 59 class CMSGCAdaptivePolicyCounters; 60 class CMSTracer; 61 class ConcurrentGCTimer; 62 class ConcurrentMarkSweepGeneration; 63 class ConcurrentMarkSweepPolicy; 64 class ConcurrentMarkSweepThread; 65 class CompactibleFreeListSpace; 66 class FreeChunk; 67 class PromotionInfo; 68 class ScanMarkedObjectsAgainCarefullyClosure; 69 class TenuredGeneration; 70 class SerialOldTracer; 71 72 // A generic CMS bit map. It's the basis for both the CMS marking bit map 73 // as well as for the mod union table (in each case only a subset of the 74 // methods are used). This is essentially a wrapper around the BitMap class, 75 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 76 // we have _shifter == 0. and for the mod union table we have 77 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 78 // XXX 64-bit issues in BitMap? 79 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 80 friend class VMStructs; 81 82 HeapWord* _bmStartWord; // base address of range covered by map 83 size_t _bmWordSize; // map size (in #HeapWords covered) 84 const int _shifter; // shifts to convert HeapWord to bit position 85 VirtualSpace _virtual_space; // underlying the bit map 86 BitMap _bm; // the bit map itself 87 public: 88 Mutex* const _lock; // mutex protecting _bm; 89 90 public: 91 // constructor 92 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 93 94 // allocates the actual storage for the map 95 bool allocate(MemRegion mr); 96 // field getter 97 Mutex* lock() const { return _lock; } 98 // locking verifier convenience function 99 void assert_locked() const PRODUCT_RETURN; 100 101 // inquiries 102 HeapWord* startWord() const { return _bmStartWord; } 103 size_t sizeInWords() const { return _bmWordSize; } 104 size_t sizeInBits() const { return _bm.size(); } 105 // the following is one past the last word in space 106 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 107 108 // reading marks 109 bool isMarked(HeapWord* addr) const; 110 bool par_isMarked(HeapWord* addr) const; // do not lock checks 111 bool isUnmarked(HeapWord* addr) const; 112 bool isAllClear() const; 113 114 // writing marks 115 void mark(HeapWord* addr); 116 // For marking by parallel GC threads; 117 // returns true if we did, false if another thread did 118 bool par_mark(HeapWord* addr); 119 120 void mark_range(MemRegion mr); 121 void par_mark_range(MemRegion mr); 122 void mark_large_range(MemRegion mr); 123 void par_mark_large_range(MemRegion mr); 124 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 125 void clear_range(MemRegion mr); 126 void par_clear_range(MemRegion mr); 127 void clear_large_range(MemRegion mr); 128 void par_clear_large_range(MemRegion mr); 129 void clear_all(); 130 void clear_all_incrementally(); // Not yet implemented!! 131 132 NOT_PRODUCT( 133 // checks the memory region for validity 134 void region_invariant(MemRegion mr); 135 ) 136 137 // iteration 138 void iterate(BitMapClosure* cl) { 139 _bm.iterate(cl); 140 } 141 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 142 void dirty_range_iterate_clear(MemRegionClosure* cl); 143 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 144 145 // auxiliary support for iteration 146 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 147 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 148 HeapWord* end_addr) const; 149 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 150 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 151 HeapWord* end_addr) const; 152 MemRegion getAndClearMarkedRegion(HeapWord* addr); 153 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 154 HeapWord* end_addr); 155 156 // conversion utilities 157 HeapWord* offsetToHeapWord(size_t offset) const; 158 size_t heapWordToOffset(HeapWord* addr) const; 159 size_t heapWordDiffToOffsetDiff(size_t diff) const; 160 161 void print_on_error(outputStream* st, const char* prefix) const; 162 163 // debugging 164 // is this address range covered by the bit-map? 165 NOT_PRODUCT( 166 bool covers(MemRegion mr) const; 167 bool covers(HeapWord* start, size_t size = 0) const; 168 ) 169 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 170 }; 171 172 // Represents a marking stack used by the CMS collector. 173 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 174 class CMSMarkStack: public CHeapObj<mtGC> { 175 // 176 friend class CMSCollector; // To get at expansion stats further below. 177 // 178 179 VirtualSpace _virtual_space; // Space for the stack 180 oop* _base; // Bottom of stack 181 size_t _index; // One more than last occupied index 182 size_t _capacity; // Max #elements 183 Mutex _par_lock; // An advisory lock used in case of parallel access 184 NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run 185 186 protected: 187 size_t _hit_limit; // We hit max stack size limit 188 size_t _failed_double; // We failed expansion before hitting limit 189 190 public: 191 CMSMarkStack(): 192 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 193 _hit_limit(0), 194 _failed_double(0) {} 195 196 bool allocate(size_t size); 197 198 size_t capacity() const { return _capacity; } 199 200 oop pop() { 201 if (!isEmpty()) { 202 return _base[--_index] ; 203 } 204 return NULL; 205 } 206 207 bool push(oop ptr) { 208 if (isFull()) { 209 return false; 210 } else { 211 _base[_index++] = ptr; 212 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 213 return true; 214 } 215 } 216 217 bool isEmpty() const { return _index == 0; } 218 bool isFull() const { 219 assert(_index <= _capacity, "buffer overflow"); 220 return _index == _capacity; 221 } 222 223 size_t length() { return _index; } 224 225 // "Parallel versions" of some of the above 226 oop par_pop() { 227 // lock and pop 228 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 229 return pop(); 230 } 231 232 bool par_push(oop ptr) { 233 // lock and push 234 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 235 return push(ptr); 236 } 237 238 // Forcibly reset the stack, losing all of its contents. 239 void reset() { 240 _index = 0; 241 } 242 243 // Expand the stack, typically in response to an overflow condition. 244 void expand(); 245 246 // Compute the least valued stack element. 247 oop least_value(HeapWord* low) { 248 oop least = (oop)low; 249 for (size_t i = 0; i < _index; i++) { 250 least = MIN2(least, _base[i]); 251 } 252 return least; 253 } 254 255 // Exposed here to allow stack expansion in || case. 256 Mutex* par_lock() { return &_par_lock; } 257 }; 258 259 class CardTableRS; 260 class CMSParGCThreadState; 261 262 class ModUnionClosure: public MemRegionClosure { 263 protected: 264 CMSBitMap* _t; 265 public: 266 ModUnionClosure(CMSBitMap* t): _t(t) { } 267 void do_MemRegion(MemRegion mr); 268 }; 269 270 class ModUnionClosurePar: public ModUnionClosure { 271 public: 272 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 273 void do_MemRegion(MemRegion mr); 274 }; 275 276 // Survivor Chunk Array in support of parallelization of 277 // Survivor Space rescan. 278 class ChunkArray: public CHeapObj<mtGC> { 279 size_t _index; 280 size_t _capacity; 281 size_t _overflows; 282 HeapWord** _array; // storage for array 283 284 public: 285 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 286 ChunkArray(HeapWord** a, size_t c): 287 _index(0), _capacity(c), _overflows(0), _array(a) {} 288 289 HeapWord** array() { return _array; } 290 void set_array(HeapWord** a) { _array = a; } 291 292 size_t capacity() { return _capacity; } 293 void set_capacity(size_t c) { _capacity = c; } 294 295 size_t end() { 296 assert(_index <= capacity(), 297 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 298 _index, _capacity)); 299 return _index; 300 } // exclusive 301 302 HeapWord* nth(size_t n) { 303 assert(n < end(), "Out of bounds access"); 304 return _array[n]; 305 } 306 307 void reset() { 308 _index = 0; 309 if (_overflows > 0 && PrintCMSStatistics > 1) { 310 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 311 _capacity, _overflows); 312 } 313 _overflows = 0; 314 } 315 316 void record_sample(HeapWord* p, size_t sz) { 317 // For now we do not do anything with the size 318 if (_index < _capacity) { 319 _array[_index++] = p; 320 } else { 321 ++_overflows; 322 assert(_index == _capacity, 323 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 324 "): out of bounds at overflow#" SIZE_FORMAT, 325 _index, _capacity, _overflows)); 326 } 327 } 328 }; 329 330 // 331 // Timing, allocation and promotion statistics for gc scheduling and incremental 332 // mode pacing. Most statistics are exponential averages. 333 // 334 class CMSStats VALUE_OBJ_CLASS_SPEC { 335 private: 336 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 337 338 // The following are exponential averages with factor alpha: 339 // avg = (100 - alpha) * avg + alpha * cur_sample 340 // 341 // The durations measure: end_time[n] - start_time[n] 342 // The periods measure: start_time[n] - start_time[n-1] 343 // 344 // The cms period and duration include only concurrent collections; time spent 345 // in foreground cms collections due to System.gc() or because of a failure to 346 // keep up are not included. 347 // 348 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 349 // real value, but is used only after the first period. A value of 100 is 350 // used for the first sample so it gets the entire weight. 351 unsigned int _saved_alpha; // 0-100 352 unsigned int _gc0_alpha; 353 unsigned int _cms_alpha; 354 355 double _gc0_duration; 356 double _gc0_period; 357 size_t _gc0_promoted; // bytes promoted per gc0 358 double _cms_duration; 359 double _cms_duration_pre_sweep; // time from initiation to start of sweep 360 double _cms_period; 361 size_t _cms_allocated; // bytes of direct allocation per gc0 period 362 363 // Timers. 364 elapsedTimer _cms_timer; 365 TimeStamp _gc0_begin_time; 366 TimeStamp _cms_begin_time; 367 TimeStamp _cms_end_time; 368 369 // Snapshots of the amount used in the CMS generation. 370 size_t _cms_used_at_gc0_begin; 371 size_t _cms_used_at_gc0_end; 372 size_t _cms_used_at_cms_begin; 373 374 // Used to prevent the duty cycle from being reduced in the middle of a cms 375 // cycle. 376 bool _allow_duty_cycle_reduction; 377 378 enum { 379 _GC0_VALID = 0x1, 380 _CMS_VALID = 0x2, 381 _ALL_VALID = _GC0_VALID | _CMS_VALID 382 }; 383 384 unsigned int _valid_bits; 385 386 protected: 387 // In support of adjusting of cms trigger ratios based on history 388 // of concurrent mode failure. 389 double cms_free_adjustment_factor(size_t free) const; 390 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 391 392 public: 393 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 394 unsigned int alpha = CMSExpAvgFactor); 395 396 // Whether or not the statistics contain valid data; higher level statistics 397 // cannot be called until this returns true (they require at least one young 398 // gen and one cms cycle to have completed). 399 bool valid() const; 400 401 // Record statistics. 402 void record_gc0_begin(); 403 void record_gc0_end(size_t cms_gen_bytes_used); 404 void record_cms_begin(); 405 void record_cms_end(); 406 407 // Allow management of the cms timer, which must be stopped/started around 408 // yield points. 409 elapsedTimer& cms_timer() { return _cms_timer; } 410 void start_cms_timer() { _cms_timer.start(); } 411 void stop_cms_timer() { _cms_timer.stop(); } 412 413 // Basic statistics; units are seconds or bytes. 414 double gc0_period() const { return _gc0_period; } 415 double gc0_duration() const { return _gc0_duration; } 416 size_t gc0_promoted() const { return _gc0_promoted; } 417 double cms_period() const { return _cms_period; } 418 double cms_duration() const { return _cms_duration; } 419 size_t cms_allocated() const { return _cms_allocated; } 420 421 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 422 423 // Seconds since the last background cms cycle began or ended. 424 double cms_time_since_begin() const; 425 double cms_time_since_end() const; 426 427 // Higher level statistics--caller must check that valid() returns true before 428 // calling. 429 430 // Returns bytes promoted per second of wall clock time. 431 double promotion_rate() const; 432 433 // Returns bytes directly allocated per second of wall clock time. 434 double cms_allocation_rate() const; 435 436 // Rate at which space in the cms generation is being consumed (sum of the 437 // above two). 438 double cms_consumption_rate() const; 439 440 // Returns an estimate of the number of seconds until the cms generation will 441 // fill up, assuming no collection work is done. 442 double time_until_cms_gen_full() const; 443 444 // Returns an estimate of the number of seconds remaining until 445 // the cms generation collection should start. 446 double time_until_cms_start() const; 447 448 // End of higher level statistics. 449 450 // Debugging. 451 void print_on(outputStream* st) const PRODUCT_RETURN; 452 void print() const { print_on(gclog_or_tty); } 453 }; 454 455 // A closure related to weak references processing which 456 // we embed in the CMSCollector, since we need to pass 457 // it to the reference processor for secondary filtering 458 // of references based on reachability of referent; 459 // see role of _is_alive_non_header closure in the 460 // ReferenceProcessor class. 461 // For objects in the CMS generation, this closure checks 462 // if the object is "live" (reachable). Used in weak 463 // reference processing. 464 class CMSIsAliveClosure: public BoolObjectClosure { 465 const MemRegion _span; 466 const CMSBitMap* _bit_map; 467 468 friend class CMSCollector; 469 public: 470 CMSIsAliveClosure(MemRegion span, 471 CMSBitMap* bit_map): 472 _span(span), 473 _bit_map(bit_map) { 474 assert(!span.is_empty(), "Empty span could spell trouble"); 475 } 476 477 bool do_object_b(oop obj); 478 }; 479 480 481 // Implements AbstractRefProcTaskExecutor for CMS. 482 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 483 public: 484 485 CMSRefProcTaskExecutor(CMSCollector& collector) 486 : _collector(collector) 487 { } 488 489 // Executes a task using worker threads. 490 virtual void execute(ProcessTask& task); 491 virtual void execute(EnqueueTask& task); 492 private: 493 CMSCollector& _collector; 494 }; 495 496 497 class CMSCollector: public CHeapObj<mtGC> { 498 friend class VMStructs; 499 friend class ConcurrentMarkSweepThread; 500 friend class ConcurrentMarkSweepGeneration; 501 friend class CompactibleFreeListSpace; 502 friend class CMSParMarkTask; 503 friend class CMSParInitialMarkTask; 504 friend class CMSParRemarkTask; 505 friend class CMSConcMarkingTask; 506 friend class CMSRefProcTaskProxy; 507 friend class CMSRefProcTaskExecutor; 508 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 509 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 510 friend class PushOrMarkClosure; // to access _restart_addr 511 friend class Par_PushOrMarkClosure; // to access _restart_addr 512 friend class MarkFromRootsClosure; // -- ditto -- 513 // ... and for clearing cards 514 friend class Par_MarkFromRootsClosure; // to access _restart_addr 515 // ... and for clearing cards 516 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 517 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 518 friend class PushAndMarkVerifyClosure; // -- ditto -- 519 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 520 friend class PushAndMarkClosure; // -- ditto -- 521 friend class Par_PushAndMarkClosure; // -- ditto -- 522 friend class CMSKeepAliveClosure; // -- ditto -- 523 friend class CMSDrainMarkingStackClosure; // -- ditto -- 524 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 525 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 526 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 527 friend class VM_CMS_Operation; 528 friend class VM_CMS_Initial_Mark; 529 friend class VM_CMS_Final_Remark; 530 friend class TraceCMSMemoryManagerStats; 531 532 private: 533 jlong _time_of_last_gc; 534 void update_time_of_last_gc(jlong now) { 535 _time_of_last_gc = now; 536 } 537 538 OopTaskQueueSet* _task_queues; 539 540 // Overflow list of grey objects, threaded through mark-word 541 // Manipulated with CAS in the parallel/multi-threaded case. 542 oop _overflow_list; 543 // The following array-pair keeps track of mark words 544 // displaced for accommodating overflow list above. 545 // This code will likely be revisited under RFE#4922830. 546 Stack<oop, mtGC> _preserved_oop_stack; 547 Stack<markOop, mtGC> _preserved_mark_stack; 548 549 int* _hash_seed; 550 551 // In support of multi-threaded concurrent phases 552 YieldingFlexibleWorkGang* _conc_workers; 553 554 // Performance Counters 555 CollectorCounters* _gc_counters; 556 557 // Initialization Errors 558 bool _completed_initialization; 559 560 // In support of ExplicitGCInvokesConcurrent 561 static bool _full_gc_requested; 562 static GCCause::Cause _full_gc_cause; 563 unsigned int _collection_count_start; 564 565 // Should we unload classes this concurrent cycle? 566 bool _should_unload_classes; 567 unsigned int _concurrent_cycles_since_last_unload; 568 unsigned int concurrent_cycles_since_last_unload() const { 569 return _concurrent_cycles_since_last_unload; 570 } 571 // Did we (allow) unload classes in the previous concurrent cycle? 572 bool unloaded_classes_last_cycle() const { 573 return concurrent_cycles_since_last_unload() == 0; 574 } 575 // Root scanning options for perm gen 576 int _roots_scanning_options; 577 int roots_scanning_options() const { return _roots_scanning_options; } 578 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 579 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 580 581 // Verification support 582 CMSBitMap _verification_mark_bm; 583 void verify_after_remark_work_1(); 584 void verify_after_remark_work_2(); 585 586 // True if any verification flag is on. 587 bool _verifying; 588 bool verifying() const { return _verifying; } 589 void set_verifying(bool v) { _verifying = v; } 590 591 // Collector policy 592 ConcurrentMarkSweepPolicy* _collector_policy; 593 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 594 595 void set_did_compact(bool v); 596 597 // XXX Move these to CMSStats ??? FIX ME !!! 598 elapsedTimer _inter_sweep_timer; // Time between sweeps 599 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps 600 // Padded decaying average estimates of the above 601 AdaptivePaddedAverage _inter_sweep_estimate; 602 AdaptivePaddedAverage _intra_sweep_estimate; 603 604 CMSTracer* _gc_tracer_cm; 605 ConcurrentGCTimer* _gc_timer_cm; 606 607 bool _cms_start_registered; 608 609 GCHeapSummary _last_heap_summary; 610 MetaspaceSummary _last_metaspace_summary; 611 612 void register_gc_start(GCCause::Cause cause); 613 void register_gc_end(); 614 void save_heap_summary(); 615 void report_heap_summary(GCWhen::Type when); 616 617 protected: 618 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) 619 MemRegion _span; // Span covering above two 620 CardTableRS* _ct; // Card table 621 622 // CMS marking support structures 623 CMSBitMap _markBitMap; 624 CMSBitMap _modUnionTable; 625 CMSMarkStack _markStack; 626 627 HeapWord* _restart_addr; // In support of marking stack overflow 628 void lower_restart_addr(HeapWord* low); 629 630 // Counters in support of marking stack / work queue overflow handling: 631 // a non-zero value indicates certain types of overflow events during 632 // the current CMS cycle and could lead to stack resizing efforts at 633 // an opportune future time. 634 size_t _ser_pmc_preclean_ovflw; 635 size_t _ser_pmc_remark_ovflw; 636 size_t _par_pmc_remark_ovflw; 637 size_t _ser_kac_preclean_ovflw; 638 size_t _ser_kac_ovflw; 639 size_t _par_kac_ovflw; 640 NOT_PRODUCT(ssize_t _num_par_pushes;) 641 642 // ("Weak") Reference processing support. 643 ReferenceProcessor* _ref_processor; 644 CMSIsAliveClosure _is_alive_closure; 645 // Keep this textually after _markBitMap and _span; c'tor dependency. 646 647 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work 648 ModUnionClosure _modUnionClosure; 649 ModUnionClosurePar _modUnionClosurePar; 650 651 // CMS abstract state machine 652 // initial_state: Idling 653 // next_state(Idling) = {Marking} 654 // next_state(Marking) = {Precleaning, Sweeping} 655 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 656 // next_state(AbortablePreclean) = {FinalMarking} 657 // next_state(FinalMarking) = {Sweeping} 658 // next_state(Sweeping) = {Resizing} 659 // next_state(Resizing) = {Resetting} 660 // next_state(Resetting) = {Idling} 661 // The numeric values below are chosen so that: 662 // . _collectorState <= Idling == post-sweep && pre-mark 663 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 664 // precleaning || abortablePrecleanb 665 public: 666 enum CollectorState { 667 Resizing = 0, 668 Resetting = 1, 669 Idling = 2, 670 InitialMarking = 3, 671 Marking = 4, 672 Precleaning = 5, 673 AbortablePreclean = 6, 674 FinalMarking = 7, 675 Sweeping = 8 676 }; 677 protected: 678 static CollectorState _collectorState; 679 680 // State related to prologue/epilogue invocation for my generations 681 bool _between_prologue_and_epilogue; 682 683 // Signaling/State related to coordination between fore- and background GC 684 // Note: When the baton has been passed from background GC to foreground GC, 685 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 686 static bool _foregroundGCIsActive; // true iff foreground collector is active or 687 // wants to go active 688 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 689 // yet passed the baton to the foreground GC 690 691 // Support for CMSScheduleRemark (abortable preclean) 692 bool _abort_preclean; 693 bool _start_sampling; 694 695 int _numYields; 696 size_t _numDirtyCards; 697 size_t _sweep_count; 698 699 // Occupancy used for bootstrapping stats 700 double _bootstrap_occupancy; 701 702 // Timer 703 elapsedTimer _timer; 704 705 // Timing, allocation and promotion statistics, used for scheduling. 706 CMSStats _stats; 707 708 enum CMS_op_type { 709 CMS_op_checkpointRootsInitial, 710 CMS_op_checkpointRootsFinal 711 }; 712 713 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 714 bool stop_world_and_do(CMS_op_type op); 715 716 OopTaskQueueSet* task_queues() { return _task_queues; } 717 int* hash_seed(int i) { return &_hash_seed[i]; } 718 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 719 720 // Support for parallelizing Eden rescan in CMS remark phase 721 void sample_eden(); // ... sample Eden space top 722 723 private: 724 // Support for parallelizing young gen rescan in CMS remark phase 725 ParNewGeneration* _young_gen; // the younger gen 726 727 HeapWord** _top_addr; // ... Top of Eden 728 HeapWord** _end_addr; // ... End of Eden 729 Mutex* _eden_chunk_lock; 730 HeapWord** _eden_chunk_array; // ... Eden partitioning array 731 size_t _eden_chunk_index; // ... top (exclusive) of array 732 size_t _eden_chunk_capacity; // ... max entries in array 733 734 // Support for parallelizing survivor space rescan 735 HeapWord** _survivor_chunk_array; 736 size_t _survivor_chunk_index; 737 size_t _survivor_chunk_capacity; 738 size_t* _cursor; 739 ChunkArray* _survivor_plab_array; 740 741 // Support for marking stack overflow handling 742 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 743 bool par_take_from_overflow_list(size_t num, 744 OopTaskQueue* to_work_q, 745 int no_of_gc_threads); 746 void push_on_overflow_list(oop p); 747 void par_push_on_overflow_list(oop p); 748 // The following is, obviously, not, in general, "MT-stable" 749 bool overflow_list_is_empty() const; 750 751 void preserve_mark_if_necessary(oop p); 752 void par_preserve_mark_if_necessary(oop p); 753 void preserve_mark_work(oop p, markOop m); 754 void restore_preserved_marks_if_any(); 755 NOT_PRODUCT(bool no_preserved_marks() const;) 756 // In support of testing overflow code 757 NOT_PRODUCT(int _overflow_counter;) 758 NOT_PRODUCT(bool simulate_overflow();) // Sequential 759 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 760 761 // CMS work methods 762 void checkpointRootsInitialWork(); // Initial checkpoint work 763 764 // A return value of false indicates failure due to stack overflow 765 bool markFromRootsWork(); // Concurrent marking work 766 767 public: // FIX ME!!! only for testing 768 bool do_marking_st(); // Single-threaded marking 769 bool do_marking_mt(); // Multi-threaded marking 770 771 private: 772 773 // Concurrent precleaning work 774 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 775 ScanMarkedObjectsAgainCarefullyClosure* cl); 776 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 777 ScanMarkedObjectsAgainCarefullyClosure* cl); 778 // Does precleaning work, returning a quantity indicative of 779 // the amount of "useful work" done. 780 size_t preclean_work(bool clean_refs, bool clean_survivors); 781 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 782 void abortable_preclean(); // Preclean while looking for possible abort 783 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 784 // Helper function for above; merge-sorts the per-thread plab samples 785 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 786 // Resets (i.e. clears) the per-thread plab sample vectors 787 void reset_survivor_plab_arrays(); 788 789 // Final (second) checkpoint work 790 void checkpointRootsFinalWork(); 791 // Work routine for parallel version of remark 792 void do_remark_parallel(); 793 // Work routine for non-parallel version of remark 794 void do_remark_non_parallel(); 795 // Reference processing work routine (during second checkpoint) 796 void refProcessingWork(); 797 798 // Concurrent sweeping work 799 void sweepWork(ConcurrentMarkSweepGeneration* gen); 800 801 // (Concurrent) resetting of support data structures 802 void reset(bool concurrent); 803 804 // Clear _expansion_cause fields of constituent generations 805 void clear_expansion_cause(); 806 807 // An auxiliary method used to record the ends of 808 // used regions of each generation to limit the extent of sweep 809 void save_sweep_limits(); 810 811 // A work method used by the foreground collector to do 812 // a mark-sweep-compact. 813 void do_compaction_work(bool clear_all_soft_refs); 814 815 // Work methods for reporting concurrent mode interruption or failure 816 bool is_external_interruption(); 817 void report_concurrent_mode_interruption(); 818 819 // If the background GC is active, acquire control from the background 820 // GC and do the collection. 821 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 822 823 // For synchronizing passing of control from background to foreground 824 // GC. waitForForegroundGC() is called by the background 825 // collector. It if had to wait for a foreground collection, 826 // it returns true and the background collection should assume 827 // that the collection was finished by the foreground 828 // collector. 829 bool waitForForegroundGC(); 830 831 size_t block_size_using_printezis_bits(HeapWord* addr) const; 832 size_t block_size_if_printezis_bits(HeapWord* addr) const; 833 HeapWord* next_card_start_after_block(HeapWord* addr) const; 834 835 void setup_cms_unloading_and_verification_state(); 836 public: 837 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 838 CardTableRS* ct, 839 ConcurrentMarkSweepPolicy* cp); 840 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 841 842 ReferenceProcessor* ref_processor() { return _ref_processor; } 843 void ref_processor_init(); 844 845 Mutex* bitMapLock() const { return _markBitMap.lock(); } 846 static CollectorState abstract_state() { return _collectorState; } 847 848 bool should_abort_preclean() const; // Whether preclean should be aborted. 849 size_t get_eden_used() const; 850 size_t get_eden_capacity() const; 851 852 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 853 854 // Locking checks 855 NOT_PRODUCT(static bool have_cms_token();) 856 857 bool shouldConcurrentCollect(); 858 859 void collect(bool full, 860 bool clear_all_soft_refs, 861 size_t size, 862 bool tlab); 863 void collect_in_background(GCCause::Cause cause); 864 865 // In support of ExplicitGCInvokesConcurrent 866 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 867 // Should we unload classes in a particular concurrent cycle? 868 bool should_unload_classes() const { 869 return _should_unload_classes; 870 } 871 void update_should_unload_classes(); 872 873 void direct_allocated(HeapWord* start, size_t size); 874 875 // Object is dead if not marked and current phase is sweeping. 876 bool is_dead_obj(oop obj) const; 877 878 // After a promotion (of "start"), do any necessary marking. 879 // If "par", then it's being done by a parallel GC thread. 880 // The last two args indicate if we need precise marking 881 // and if so the size of the object so it can be dirtied 882 // in its entirety. 883 void promoted(bool par, HeapWord* start, 884 bool is_obj_array, size_t obj_size); 885 886 void getFreelistLocks() const; 887 void releaseFreelistLocks() const; 888 bool haveFreelistLocks() const; 889 890 // Adjust size of underlying generation 891 void compute_new_size(); 892 893 // GC prologue and epilogue 894 void gc_prologue(bool full); 895 void gc_epilogue(bool full); 896 897 jlong time_of_last_gc(jlong now) { 898 if (_collectorState <= Idling) { 899 // gc not in progress 900 return _time_of_last_gc; 901 } else { 902 // collection in progress 903 return now; 904 } 905 } 906 907 // Support for parallel remark of survivor space 908 void* get_data_recorder(int thr_num); 909 void sample_eden_chunk(); 910 911 CMSBitMap* markBitMap() { return &_markBitMap; } 912 void directAllocated(HeapWord* start, size_t size); 913 914 // Main CMS steps and related support 915 void checkpointRootsInitial(); 916 bool markFromRoots(); // a return value of false indicates failure 917 // due to stack overflow 918 void preclean(); 919 void checkpointRootsFinal(); 920 void sweep(); 921 922 // Check that the currently executing thread is the expected 923 // one (foreground collector or background collector). 924 static void check_correct_thread_executing() PRODUCT_RETURN; 925 926 bool is_cms_reachable(HeapWord* addr); 927 928 // Performance Counter Support 929 CollectorCounters* counters() { return _gc_counters; } 930 931 // Timer stuff 932 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 933 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 934 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 935 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 936 937 int yields() { return _numYields; } 938 void resetYields() { _numYields = 0; } 939 void incrementYields() { _numYields++; } 940 void resetNumDirtyCards() { _numDirtyCards = 0; } 941 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 942 size_t numDirtyCards() { return _numDirtyCards; } 943 944 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 945 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 946 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 947 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 948 size_t sweep_count() const { return _sweep_count; } 949 void increment_sweep_count() { _sweep_count++; } 950 951 // Timers/stats for gc scheduling and incremental mode pacing. 952 CMSStats& stats() { return _stats; } 953 954 // Adaptive size policy 955 AdaptiveSizePolicy* size_policy(); 956 957 static void print_on_error(outputStream* st); 958 959 // Debugging 960 void verify(); 961 bool verify_after_remark(bool silent = VerifySilently); 962 void verify_ok_to_terminate() const PRODUCT_RETURN; 963 void verify_work_stacks_empty() const PRODUCT_RETURN; 964 void verify_overflow_empty() const PRODUCT_RETURN; 965 966 // Convenience methods in support of debugging 967 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 968 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 969 970 // Accessors 971 CMSMarkStack* verification_mark_stack() { return &_markStack; } 972 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 973 974 // Initialization errors 975 bool completed_initialization() { return _completed_initialization; } 976 977 void print_eden_and_survivor_chunk_arrays(); 978 }; 979 980 class CMSExpansionCause : public AllStatic { 981 public: 982 enum Cause { 983 _no_expansion, 984 _satisfy_free_ratio, 985 _satisfy_promotion, 986 _satisfy_allocation, 987 _allocate_par_lab, 988 _allocate_par_spooling_space, 989 _adaptive_size_policy 990 }; 991 // Return a string describing the cause of the expansion. 992 static const char* to_string(CMSExpansionCause::Cause cause); 993 }; 994 995 class ConcurrentMarkSweepGeneration: public CardGeneration { 996 friend class VMStructs; 997 friend class ConcurrentMarkSweepThread; 998 friend class ConcurrentMarkSweep; 999 friend class CMSCollector; 1000 protected: 1001 static CMSCollector* _collector; // the collector that collects us 1002 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1003 1004 // Performance Counters 1005 GenerationCounters* _gen_counters; 1006 GSpaceCounters* _space_counters; 1007 1008 // Words directly allocated, used by CMSStats. 1009 size_t _direct_allocated_words; 1010 1011 // Non-product stat counters 1012 NOT_PRODUCT( 1013 size_t _numObjectsPromoted; 1014 size_t _numWordsPromoted; 1015 size_t _numObjectsAllocated; 1016 size_t _numWordsAllocated; 1017 ) 1018 1019 // Used for sizing decisions 1020 bool _incremental_collection_failed; 1021 bool incremental_collection_failed() { 1022 return _incremental_collection_failed; 1023 } 1024 void set_incremental_collection_failed() { 1025 _incremental_collection_failed = true; 1026 } 1027 void clear_incremental_collection_failed() { 1028 _incremental_collection_failed = false; 1029 } 1030 1031 // accessors 1032 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1033 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1034 1035 // Accessing spaces 1036 CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; } 1037 1038 private: 1039 // For parallel young-gen GC support. 1040 CMSParGCThreadState** _par_gc_thread_states; 1041 1042 // Reason generation was expanded 1043 CMSExpansionCause::Cause _expansion_cause; 1044 1045 // In support of MinChunkSize being larger than min object size 1046 const double _dilatation_factor; 1047 1048 // True if a compacting collection was done. 1049 bool _did_compact; 1050 bool did_compact() { return _did_compact; } 1051 1052 // Fraction of current occupancy at which to start a CMS collection which 1053 // will collect this generation (at least). 1054 double _initiating_occupancy; 1055 1056 protected: 1057 // Shrink generation by specified size (returns false if unable to shrink) 1058 void shrink_free_list_by(size_t bytes); 1059 1060 // Update statistics for GC 1061 virtual void update_gc_stats(int level, bool full); 1062 1063 // Maximum available space in the generation (including uncommitted) 1064 // space. 1065 size_t max_available() const; 1066 1067 // getter and initializer for _initiating_occupancy field. 1068 double initiating_occupancy() const { return _initiating_occupancy; } 1069 void init_initiating_occupancy(intx io, uintx tr); 1070 1071 void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); 1072 1073 void assert_correct_size_change_locking(); 1074 1075 public: 1076 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1077 int level, CardTableRS* ct, 1078 bool use_adaptive_freelists, 1079 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1080 1081 // Accessors 1082 CMSCollector* collector() const { return _collector; } 1083 static void set_collector(CMSCollector* collector) { 1084 assert(_collector == NULL, "already set"); 1085 _collector = collector; 1086 } 1087 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1088 1089 Mutex* freelistLock() const; 1090 1091 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1092 1093 void set_did_compact(bool v) { _did_compact = v; } 1094 1095 bool refs_discovery_is_atomic() const { return false; } 1096 bool refs_discovery_is_mt() const { 1097 // Note: CMS does MT-discovery during the parallel-remark 1098 // phases. Use ReferenceProcessorMTMutator to make refs 1099 // discovery MT-safe during such phases or other parallel 1100 // discovery phases in the future. This may all go away 1101 // if/when we decide that refs discovery is sufficiently 1102 // rare that the cost of the CAS's involved is in the 1103 // noise. That's a measurement that should be done, and 1104 // the code simplified if that turns out to be the case. 1105 return ConcGCThreads > 1; 1106 } 1107 1108 // Override 1109 virtual void ref_processor_init(); 1110 1111 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1112 1113 // Space enquiries 1114 double occupancy() const { return ((double)used())/((double)capacity()); } 1115 size_t contiguous_available() const; 1116 size_t unsafe_max_alloc_nogc() const; 1117 1118 // over-rides 1119 MemRegion used_region_at_save_marks() const; 1120 1121 // Does a "full" (forced) collection invoked on this generation collect 1122 // all younger generations as well? Note that the second conjunct is a 1123 // hack to allow the collection of the younger gen first if the flag is 1124 // set. 1125 virtual bool full_collects_younger_generations() const { 1126 return !ScavengeBeforeFullGC; 1127 } 1128 1129 // Adjust quantities in the generation affected by 1130 // the compaction. 1131 void reset_after_compaction(); 1132 1133 // Allocation support 1134 HeapWord* allocate(size_t size, bool tlab); 1135 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1136 oop promote(oop obj, size_t obj_size); 1137 HeapWord* par_allocate(size_t size, bool tlab) { 1138 return allocate(size, tlab); 1139 } 1140 1141 1142 // Used by CMSStats to track direct allocation. The value is sampled and 1143 // reset after each young gen collection. 1144 size_t direct_allocated_words() const { return _direct_allocated_words; } 1145 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1146 1147 // Overrides for parallel promotion. 1148 virtual oop par_promote(int thread_num, 1149 oop obj, markOop m, size_t word_sz); 1150 virtual void par_promote_alloc_done(int thread_num); 1151 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1152 1153 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1154 1155 // Inform this (non-young) generation that a promotion failure was 1156 // encountered during a collection of a younger generation that 1157 // promotes into this generation. 1158 virtual void promotion_failure_occurred(); 1159 1160 bool should_collect(bool full, size_t size, bool tlab); 1161 virtual bool should_concurrent_collect() const; 1162 virtual bool is_too_full() const; 1163 void collect(bool full, 1164 bool clear_all_soft_refs, 1165 size_t size, 1166 bool tlab); 1167 1168 HeapWord* expand_and_allocate(size_t word_size, 1169 bool tlab, 1170 bool parallel = false); 1171 1172 // GC prologue and epilogue 1173 void gc_prologue(bool full); 1174 void gc_prologue_work(bool full, bool registerClosure, 1175 ModUnionClosure* modUnionClosure); 1176 void gc_epilogue(bool full); 1177 void gc_epilogue_work(bool full); 1178 1179 // Time since last GC of this generation 1180 jlong time_of_last_gc(jlong now) { 1181 return collector()->time_of_last_gc(now); 1182 } 1183 void update_time_of_last_gc(jlong now) { 1184 collector()-> update_time_of_last_gc(now); 1185 } 1186 1187 // Allocation failure 1188 void shrink(size_t bytes); 1189 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1190 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1191 1192 // Iteration support and related enquiries 1193 void save_marks(); 1194 bool no_allocs_since_save_marks(); 1195 1196 // Iteration support specific to CMS generations 1197 void save_sweep_limit(); 1198 1199 // More iteration support 1200 virtual void oop_iterate(ExtendedOopClosure* cl); 1201 virtual void safe_object_iterate(ObjectClosure* cl); 1202 virtual void object_iterate(ObjectClosure* cl); 1203 1204 // Need to declare the full complement of closures, whether we'll 1205 // override them or not, or get message from the compiler: 1206 // oop_since_save_marks_iterate_nv hides virtual function... 1207 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1208 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1209 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1210 1211 // Smart allocation XXX -- move to CFLSpace? 1212 void setNearLargestChunk(); 1213 bool isNearLargestChunk(HeapWord* addr); 1214 1215 // Get the chunk at the end of the space. Delegates to 1216 // the space. 1217 FreeChunk* find_chunk_at_end(); 1218 1219 void post_compact(); 1220 1221 // Debugging 1222 void prepare_for_verify(); 1223 void verify(); 1224 void print_statistics() PRODUCT_RETURN; 1225 1226 // Performance Counters support 1227 virtual void update_counters(); 1228 virtual void update_counters(size_t used); 1229 void initialize_performance_counters(); 1230 CollectorCounters* counters() { return collector()->counters(); } 1231 1232 // Support for parallel remark of survivor space 1233 void* get_data_recorder(int thr_num) { 1234 //Delegate to collector 1235 return collector()->get_data_recorder(thr_num); 1236 } 1237 void sample_eden_chunk() { 1238 //Delegate to collector 1239 return collector()->sample_eden_chunk(); 1240 } 1241 1242 // Printing 1243 const char* name() const; 1244 virtual const char* short_name() const { return "CMS"; } 1245 void print() const; 1246 void printOccupancy(const char* s); 1247 1248 // Resize the generation after a compacting GC. The 1249 // generation can be treated as a contiguous space 1250 // after the compaction. 1251 virtual void compute_new_size(); 1252 // Resize the generation after a non-compacting 1253 // collection. 1254 void compute_new_size_free_list(); 1255 }; 1256 1257 // 1258 // Closures of various sorts used by CMS to accomplish its work 1259 // 1260 1261 // This closure is used to do concurrent marking from the roots 1262 // following the first checkpoint. 1263 class MarkFromRootsClosure: public BitMapClosure { 1264 CMSCollector* _collector; 1265 MemRegion _span; 1266 CMSBitMap* _bitMap; 1267 CMSBitMap* _mut; 1268 CMSMarkStack* _markStack; 1269 bool _yield; 1270 int _skipBits; 1271 HeapWord* _finger; 1272 HeapWord* _threshold; 1273 DEBUG_ONLY(bool _verifying;) 1274 1275 public: 1276 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1277 CMSBitMap* bitMap, 1278 CMSMarkStack* markStack, 1279 bool should_yield, bool verifying = false); 1280 bool do_bit(size_t offset); 1281 void reset(HeapWord* addr); 1282 inline void do_yield_check(); 1283 1284 private: 1285 void scanOopsInOop(HeapWord* ptr); 1286 void do_yield_work(); 1287 }; 1288 1289 // This closure is used to do concurrent multi-threaded 1290 // marking from the roots following the first checkpoint. 1291 // XXX This should really be a subclass of The serial version 1292 // above, but i have not had the time to refactor things cleanly. 1293 class Par_MarkFromRootsClosure: public BitMapClosure { 1294 CMSCollector* _collector; 1295 MemRegion _whole_span; 1296 MemRegion _span; 1297 CMSBitMap* _bit_map; 1298 CMSBitMap* _mut; 1299 OopTaskQueue* _work_queue; 1300 CMSMarkStack* _overflow_stack; 1301 int _skip_bits; 1302 HeapWord* _finger; 1303 HeapWord* _threshold; 1304 CMSConcMarkingTask* _task; 1305 public: 1306 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1307 MemRegion span, 1308 CMSBitMap* bit_map, 1309 OopTaskQueue* work_queue, 1310 CMSMarkStack* overflow_stack); 1311 bool do_bit(size_t offset); 1312 inline void do_yield_check(); 1313 1314 private: 1315 void scan_oops_in_oop(HeapWord* ptr); 1316 void do_yield_work(); 1317 bool get_work_from_overflow_stack(); 1318 }; 1319 1320 // The following closures are used to do certain kinds of verification of 1321 // CMS marking. 1322 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { 1323 CMSCollector* _collector; 1324 MemRegion _span; 1325 CMSBitMap* _verification_bm; 1326 CMSBitMap* _cms_bm; 1327 CMSMarkStack* _mark_stack; 1328 protected: 1329 void do_oop(oop p); 1330 template <class T> inline void do_oop_work(T *p) { 1331 oop obj = oopDesc::load_decode_heap_oop(p); 1332 do_oop(obj); 1333 } 1334 public: 1335 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1336 MemRegion span, 1337 CMSBitMap* verification_bm, 1338 CMSBitMap* cms_bm, 1339 CMSMarkStack* mark_stack); 1340 void do_oop(oop* p); 1341 void do_oop(narrowOop* p); 1342 1343 // Deal with a stack overflow condition 1344 void handle_stack_overflow(HeapWord* lost); 1345 }; 1346 1347 class MarkFromRootsVerifyClosure: public BitMapClosure { 1348 CMSCollector* _collector; 1349 MemRegion _span; 1350 CMSBitMap* _verification_bm; 1351 CMSBitMap* _cms_bm; 1352 CMSMarkStack* _mark_stack; 1353 HeapWord* _finger; 1354 PushAndMarkVerifyClosure _pam_verify_closure; 1355 public: 1356 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1357 CMSBitMap* verification_bm, 1358 CMSBitMap* cms_bm, 1359 CMSMarkStack* mark_stack); 1360 bool do_bit(size_t offset); 1361 void reset(HeapWord* addr); 1362 }; 1363 1364 1365 // This closure is used to check that a certain set of bits is 1366 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1367 class FalseBitMapClosure: public BitMapClosure { 1368 public: 1369 bool do_bit(size_t offset) { 1370 guarantee(false, "Should not have a 1 bit"); 1371 return true; 1372 } 1373 }; 1374 1375 // A version of ObjectClosure with "memory" (see _previous_address below) 1376 class UpwardsObjectClosure: public BoolObjectClosure { 1377 HeapWord* _previous_address; 1378 public: 1379 UpwardsObjectClosure() : _previous_address(NULL) { } 1380 void set_previous(HeapWord* addr) { _previous_address = addr; } 1381 HeapWord* previous() { return _previous_address; } 1382 // A return value of "true" can be used by the caller to decide 1383 // if this object's end should *NOT* be recorded in 1384 // _previous_address above. 1385 virtual bool do_object_bm(oop obj, MemRegion mr) = 0; 1386 }; 1387 1388 // This closure is used during the second checkpointing phase 1389 // to rescan the marked objects on the dirty cards in the mod 1390 // union table and the card table proper. It's invoked via 1391 // MarkFromDirtyCardsClosure below. It uses either 1392 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1393 // declared in genOopClosures.hpp to accomplish some of its work. 1394 // In the parallel case the bitMap is shared, so access to 1395 // it needs to be suitably synchronized for updates by embedded 1396 // closures that update it; however, this closure itself only 1397 // reads the bit_map and because it is idempotent, is immune to 1398 // reading stale values. 1399 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1400 #ifdef ASSERT 1401 CMSCollector* _collector; 1402 MemRegion _span; 1403 union { 1404 CMSMarkStack* _mark_stack; 1405 OopTaskQueue* _work_queue; 1406 }; 1407 #endif // ASSERT 1408 bool _parallel; 1409 CMSBitMap* _bit_map; 1410 union { 1411 MarkRefsIntoAndScanClosure* _scan_closure; 1412 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1413 }; 1414 1415 public: 1416 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1417 MemRegion span, 1418 ReferenceProcessor* rp, 1419 CMSBitMap* bit_map, 1420 CMSMarkStack* mark_stack, 1421 MarkRefsIntoAndScanClosure* cl): 1422 #ifdef ASSERT 1423 _collector(collector), 1424 _span(span), 1425 _mark_stack(mark_stack), 1426 #endif // ASSERT 1427 _parallel(false), 1428 _bit_map(bit_map), 1429 _scan_closure(cl) { } 1430 1431 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1432 MemRegion span, 1433 ReferenceProcessor* rp, 1434 CMSBitMap* bit_map, 1435 OopTaskQueue* work_queue, 1436 Par_MarkRefsIntoAndScanClosure* cl): 1437 #ifdef ASSERT 1438 _collector(collector), 1439 _span(span), 1440 _work_queue(work_queue), 1441 #endif // ASSERT 1442 _parallel(true), 1443 _bit_map(bit_map), 1444 _par_scan_closure(cl) { } 1445 1446 bool do_object_b(oop obj) { 1447 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1448 return false; 1449 } 1450 bool do_object_bm(oop p, MemRegion mr); 1451 }; 1452 1453 // This closure is used during the second checkpointing phase 1454 // to rescan the marked objects on the dirty cards in the mod 1455 // union table and the card table proper. It invokes 1456 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1457 // In the parallel case, the bit map is shared and requires 1458 // synchronized access. 1459 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1460 CompactibleFreeListSpace* _space; 1461 ScanMarkedObjectsAgainClosure _scan_cl; 1462 size_t _num_dirty_cards; 1463 1464 public: 1465 MarkFromDirtyCardsClosure(CMSCollector* collector, 1466 MemRegion span, 1467 CompactibleFreeListSpace* space, 1468 CMSBitMap* bit_map, 1469 CMSMarkStack* mark_stack, 1470 MarkRefsIntoAndScanClosure* cl): 1471 _space(space), 1472 _num_dirty_cards(0), 1473 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1474 mark_stack, cl) { } 1475 1476 MarkFromDirtyCardsClosure(CMSCollector* collector, 1477 MemRegion span, 1478 CompactibleFreeListSpace* space, 1479 CMSBitMap* bit_map, 1480 OopTaskQueue* work_queue, 1481 Par_MarkRefsIntoAndScanClosure* cl): 1482 _space(space), 1483 _num_dirty_cards(0), 1484 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1485 work_queue, cl) { } 1486 1487 void do_MemRegion(MemRegion mr); 1488 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1489 size_t num_dirty_cards() { return _num_dirty_cards; } 1490 }; 1491 1492 // This closure is used in the non-product build to check 1493 // that there are no MemRegions with a certain property. 1494 class FalseMemRegionClosure: public MemRegionClosure { 1495 void do_MemRegion(MemRegion mr) { 1496 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1497 guarantee(false, "Should never be here"); 1498 } 1499 }; 1500 1501 // This closure is used during the precleaning phase 1502 // to "carefully" rescan marked objects on dirty cards. 1503 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1504 // to accomplish some of its work. 1505 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1506 CMSCollector* _collector; 1507 MemRegion _span; 1508 bool _yield; 1509 Mutex* _freelistLock; 1510 CMSBitMap* _bitMap; 1511 CMSMarkStack* _markStack; 1512 MarkRefsIntoAndScanClosure* _scanningClosure; 1513 1514 public: 1515 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1516 MemRegion span, 1517 CMSBitMap* bitMap, 1518 CMSMarkStack* markStack, 1519 MarkRefsIntoAndScanClosure* cl, 1520 bool should_yield): 1521 _collector(collector), 1522 _span(span), 1523 _yield(should_yield), 1524 _bitMap(bitMap), 1525 _markStack(markStack), 1526 _scanningClosure(cl) { 1527 } 1528 1529 void do_object(oop p) { 1530 guarantee(false, "call do_object_careful instead"); 1531 } 1532 1533 size_t do_object_careful(oop p) { 1534 guarantee(false, "Unexpected caller"); 1535 return 0; 1536 } 1537 1538 size_t do_object_careful_m(oop p, MemRegion mr); 1539 1540 void setFreelistLock(Mutex* m) { 1541 _freelistLock = m; 1542 _scanningClosure->set_freelistLock(m); 1543 } 1544 1545 private: 1546 inline bool do_yield_check(); 1547 1548 void do_yield_work(); 1549 }; 1550 1551 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1552 CMSCollector* _collector; 1553 MemRegion _span; 1554 bool _yield; 1555 CMSBitMap* _bit_map; 1556 CMSMarkStack* _mark_stack; 1557 PushAndMarkClosure* _scanning_closure; 1558 unsigned int _before_count; 1559 1560 public: 1561 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1562 MemRegion span, 1563 CMSBitMap* bit_map, 1564 CMSMarkStack* mark_stack, 1565 PushAndMarkClosure* cl, 1566 unsigned int before_count, 1567 bool should_yield): 1568 _collector(collector), 1569 _span(span), 1570 _yield(should_yield), 1571 _bit_map(bit_map), 1572 _mark_stack(mark_stack), 1573 _scanning_closure(cl), 1574 _before_count(before_count) 1575 { } 1576 1577 void do_object(oop p) { 1578 guarantee(false, "call do_object_careful instead"); 1579 } 1580 1581 size_t do_object_careful(oop p); 1582 1583 size_t do_object_careful_m(oop p, MemRegion mr) { 1584 guarantee(false, "Unexpected caller"); 1585 return 0; 1586 } 1587 1588 private: 1589 inline void do_yield_check(); 1590 void do_yield_work(); 1591 }; 1592 1593 // This closure is used to accomplish the sweeping work 1594 // after the second checkpoint but before the concurrent reset 1595 // phase. 1596 // 1597 // Terminology 1598 // left hand chunk (LHC) - block of one or more chunks currently being 1599 // coalesced. The LHC is available for coalescing with a new chunk. 1600 // right hand chunk (RHC) - block that is currently being swept that is 1601 // free or garbage that can be coalesced with the LHC. 1602 // _inFreeRange is true if there is currently a LHC 1603 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1604 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1605 // _freeFinger is the address of the current LHC 1606 class SweepClosure: public BlkClosureCareful { 1607 CMSCollector* _collector; // collector doing the work 1608 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1609 CompactibleFreeListSpace* _sp; // Space being swept 1610 HeapWord* _limit;// the address at or above which the sweep should stop 1611 // because we do not expect newly garbage blocks 1612 // eligible for sweeping past that address. 1613 Mutex* _freelistLock; // Free list lock (in space) 1614 CMSBitMap* _bitMap; // Marking bit map (in 1615 // generation) 1616 bool _inFreeRange; // Indicates if we are in the 1617 // midst of a free run 1618 bool _freeRangeInFreeLists; 1619 // Often, we have just found 1620 // a free chunk and started 1621 // a new free range; we do not 1622 // eagerly remove this chunk from 1623 // the free lists unless there is 1624 // a possibility of coalescing. 1625 // When true, this flag indicates 1626 // that the _freeFinger below 1627 // points to a potentially free chunk 1628 // that may still be in the free lists 1629 bool _lastFreeRangeCoalesced; 1630 // free range contains chunks 1631 // coalesced 1632 bool _yield; 1633 // Whether sweeping should be 1634 // done with yields. For instance 1635 // when done by the foreground 1636 // collector we shouldn't yield. 1637 HeapWord* _freeFinger; // When _inFreeRange is set, the 1638 // pointer to the "left hand 1639 // chunk" 1640 size_t _freeRangeSize; 1641 // When _inFreeRange is set, this 1642 // indicates the accumulated size 1643 // of the "left hand chunk" 1644 NOT_PRODUCT( 1645 size_t _numObjectsFreed; 1646 size_t _numWordsFreed; 1647 size_t _numObjectsLive; 1648 size_t _numWordsLive; 1649 size_t _numObjectsAlreadyFree; 1650 size_t _numWordsAlreadyFree; 1651 FreeChunk* _last_fc; 1652 ) 1653 private: 1654 // Code that is common to a free chunk or garbage when 1655 // encountered during sweeping. 1656 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1657 // Process a free chunk during sweeping. 1658 void do_already_free_chunk(FreeChunk *fc); 1659 // Work method called when processing an already free or a 1660 // freshly garbage chunk to do a lookahead and possibly a 1661 // preemptive flush if crossing over _limit. 1662 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1663 // Process a garbage chunk during sweeping. 1664 size_t do_garbage_chunk(FreeChunk *fc); 1665 // Process a live chunk during sweeping. 1666 size_t do_live_chunk(FreeChunk* fc); 1667 1668 // Accessors. 1669 HeapWord* freeFinger() const { return _freeFinger; } 1670 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1671 bool inFreeRange() const { return _inFreeRange; } 1672 void set_inFreeRange(bool v) { _inFreeRange = v; } 1673 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1674 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1675 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1676 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1677 1678 // Initialize a free range. 1679 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1680 // Return this chunk to the free lists. 1681 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1682 1683 // Check if we should yield and do so when necessary. 1684 inline void do_yield_check(HeapWord* addr); 1685 1686 // Yield 1687 void do_yield_work(HeapWord* addr); 1688 1689 // Debugging/Printing 1690 void print_free_block_coalesced(FreeChunk* fc) const; 1691 1692 public: 1693 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1694 CMSBitMap* bitMap, bool should_yield); 1695 ~SweepClosure() PRODUCT_RETURN; 1696 1697 size_t do_blk_careful(HeapWord* addr); 1698 void print() const { print_on(tty); } 1699 void print_on(outputStream *st) const; 1700 }; 1701 1702 // Closures related to weak references processing 1703 1704 // During CMS' weak reference processing, this is a 1705 // work-routine/closure used to complete transitive 1706 // marking of objects as live after a certain point 1707 // in which an initial set has been completely accumulated. 1708 // This closure is currently used both during the final 1709 // remark stop-world phase, as well as during the concurrent 1710 // precleaning of the discovered reference lists. 1711 class CMSDrainMarkingStackClosure: public VoidClosure { 1712 CMSCollector* _collector; 1713 MemRegion _span; 1714 CMSMarkStack* _mark_stack; 1715 CMSBitMap* _bit_map; 1716 CMSKeepAliveClosure* _keep_alive; 1717 bool _concurrent_precleaning; 1718 public: 1719 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1720 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1721 CMSKeepAliveClosure* keep_alive, 1722 bool cpc): 1723 _collector(collector), 1724 _span(span), 1725 _bit_map(bit_map), 1726 _mark_stack(mark_stack), 1727 _keep_alive(keep_alive), 1728 _concurrent_precleaning(cpc) { 1729 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1730 "Mismatch"); 1731 } 1732 1733 void do_void(); 1734 }; 1735 1736 // A parallel version of CMSDrainMarkingStackClosure above. 1737 class CMSParDrainMarkingStackClosure: public VoidClosure { 1738 CMSCollector* _collector; 1739 MemRegion _span; 1740 OopTaskQueue* _work_queue; 1741 CMSBitMap* _bit_map; 1742 CMSInnerParMarkAndPushClosure _mark_and_push; 1743 1744 public: 1745 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1746 MemRegion span, CMSBitMap* bit_map, 1747 OopTaskQueue* work_queue): 1748 _collector(collector), 1749 _span(span), 1750 _bit_map(bit_map), 1751 _work_queue(work_queue), 1752 _mark_and_push(collector, span, bit_map, work_queue) { } 1753 1754 public: 1755 void trim_queue(uint max); 1756 void do_void(); 1757 }; 1758 1759 // Allow yielding or short-circuiting of reference list 1760 // precleaning work. 1761 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1762 CMSCollector* _collector; 1763 void do_yield_work(); 1764 public: 1765 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1766 _collector(collector) {} 1767 virtual bool should_return(); 1768 }; 1769 1770 1771 // Convenience class that locks free list locks for given CMS collector 1772 class FreelistLocker: public StackObj { 1773 private: 1774 CMSCollector* _collector; 1775 public: 1776 FreelistLocker(CMSCollector* collector): 1777 _collector(collector) { 1778 _collector->getFreelistLocks(); 1779 } 1780 1781 ~FreelistLocker() { 1782 _collector->releaseFreelistLocks(); 1783 } 1784 }; 1785 1786 // Mark all dead objects in a given space. 1787 class MarkDeadObjectsClosure: public BlkClosure { 1788 const CMSCollector* _collector; 1789 const CompactibleFreeListSpace* _sp; 1790 CMSBitMap* _live_bit_map; 1791 CMSBitMap* _dead_bit_map; 1792 public: 1793 MarkDeadObjectsClosure(const CMSCollector* collector, 1794 const CompactibleFreeListSpace* sp, 1795 CMSBitMap *live_bit_map, 1796 CMSBitMap *dead_bit_map) : 1797 _collector(collector), 1798 _sp(sp), 1799 _live_bit_map(live_bit_map), 1800 _dead_bit_map(dead_bit_map) {} 1801 size_t do_blk(HeapWord* addr); 1802 }; 1803 1804 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1805 1806 public: 1807 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1808 }; 1809 1810 1811 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP