1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc/cms/cmsOopClosures.hpp" 29 #include "gc/cms/gSpaceCounters.hpp" 30 #include "gc/cms/yieldingWorkgroup.hpp" 31 #include "gc/shared/cardGeneration.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcStats.hpp" 34 #include "gc/shared/gcWhen.hpp" 35 #include "gc/shared/generationCounters.hpp" 36 #include "gc/shared/space.hpp" 37 #include "gc/shared/taskqueue.hpp" 38 #include "logging/log.hpp" 39 #include "memory/freeBlockDictionary.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/virtualspace.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "services/memoryService.hpp" 44 #include "utilities/bitMap.hpp" 45 #include "utilities/stack.hpp" 46 47 // ConcurrentMarkSweepGeneration is in support of a concurrent 48 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 49 // style. We assume, for now, that this generation is always the 50 // seniormost generation and for simplicity 51 // in the first implementation, that this generation is a single compactible 52 // space. Neither of these restrictions appears essential, and will be 53 // relaxed in the future when more time is available to implement the 54 // greater generality (and there's a need for it). 55 // 56 // Concurrent mode failures are currently handled by 57 // means of a sliding mark-compact. 58 59 class AdaptiveSizePolicy; 60 class CMSCollector; 61 class CMSConcMarkingTask; 62 class CMSGCAdaptivePolicyCounters; 63 class CMSTracer; 64 class ConcurrentGCTimer; 65 class ConcurrentMarkSweepGeneration; 66 class ConcurrentMarkSweepPolicy; 67 class ConcurrentMarkSweepThread; 68 class CompactibleFreeListSpace; 69 class FreeChunk; 70 class ParNewGeneration; 71 class PromotionInfo; 72 class ScanMarkedObjectsAgainCarefullyClosure; 73 class TenuredGeneration; 74 class SerialOldTracer; 75 76 // A generic CMS bit map. It's the basis for both the CMS marking bit map 77 // as well as for the mod union table (in each case only a subset of the 78 // methods are used). This is essentially a wrapper around the BitMap class, 79 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 80 // we have _shifter == 0. and for the mod union table we have 81 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 82 // XXX 64-bit issues in BitMap? 83 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 84 friend class VMStructs; 85 86 HeapWord* _bmStartWord; // base address of range covered by map 87 size_t _bmWordSize; // map size (in #HeapWords covered) 88 const int _shifter; // shifts to convert HeapWord to bit position 89 VirtualSpace _virtual_space; // underlying the bit map 90 BitMapView _bm; // the bit map itself 91 Mutex* const _lock; // mutex protecting _bm; 92 93 public: 94 // constructor 95 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 96 97 // allocates the actual storage for the map 98 bool allocate(MemRegion mr); 99 // field getter 100 Mutex* lock() const { return _lock; } 101 // locking verifier convenience function 102 void assert_locked() const PRODUCT_RETURN; 103 104 // inquiries 105 HeapWord* startWord() const { return _bmStartWord; } 106 size_t sizeInWords() const { return _bmWordSize; } 107 size_t sizeInBits() const { return _bm.size(); } 108 // the following is one past the last word in space 109 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 110 111 // reading marks 112 bool isMarked(HeapWord* addr) const; 113 bool par_isMarked(HeapWord* addr) const; // do not lock checks 114 bool isUnmarked(HeapWord* addr) const; 115 bool isAllClear() const; 116 117 // writing marks 118 void mark(HeapWord* addr); 119 // For marking by parallel GC threads; 120 // returns true if we did, false if another thread did 121 bool par_mark(HeapWord* addr); 122 123 void mark_range(MemRegion mr); 124 void par_mark_range(MemRegion mr); 125 void mark_large_range(MemRegion mr); 126 void par_mark_large_range(MemRegion mr); 127 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 128 void clear_range(MemRegion mr); 129 void par_clear_range(MemRegion mr); 130 void clear_large_range(MemRegion mr); 131 void par_clear_large_range(MemRegion mr); 132 void clear_all(); 133 void clear_all_incrementally(); // Not yet implemented!! 134 135 NOT_PRODUCT( 136 // checks the memory region for validity 137 void region_invariant(MemRegion mr); 138 ) 139 140 // iteration 141 void iterate(BitMapClosure* cl) { 142 _bm.iterate(cl); 143 } 144 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 145 void dirty_range_iterate_clear(MemRegionClosure* cl); 146 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 147 148 // auxiliary support for iteration 149 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 150 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 151 HeapWord* end_addr) const; 152 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 153 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 154 HeapWord* end_addr) const; 155 MemRegion getAndClearMarkedRegion(HeapWord* addr); 156 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 157 HeapWord* end_addr); 158 159 // conversion utilities 160 HeapWord* offsetToHeapWord(size_t offset) const; 161 size_t heapWordToOffset(HeapWord* addr) const; 162 size_t heapWordDiffToOffsetDiff(size_t diff) const; 163 164 void print_on_error(outputStream* st, const char* prefix) const; 165 166 // debugging 167 // is this address range covered by the bit-map? 168 NOT_PRODUCT( 169 bool covers(MemRegion mr) const; 170 bool covers(HeapWord* start, size_t size = 0) const; 171 ) 172 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 173 }; 174 175 // Represents a marking stack used by the CMS collector. 176 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 177 class CMSMarkStack: public CHeapObj<mtGC> { 178 friend class CMSCollector; // To get at expansion stats further below. 179 180 VirtualSpace _virtual_space; // Space for the stack 181 oop* _base; // Bottom of stack 182 size_t _index; // One more than last occupied index 183 size_t _capacity; // Max #elements 184 Mutex _par_lock; // An advisory lock used in case of parallel access 185 NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run 186 187 protected: 188 size_t _hit_limit; // We hit max stack size limit 189 size_t _failed_double; // We failed expansion before hitting limit 190 191 public: 192 CMSMarkStack(): 193 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true, 194 Monitor::_safepoint_check_never), 195 _hit_limit(0), 196 _failed_double(0) {} 197 198 bool allocate(size_t size); 199 200 size_t capacity() const { return _capacity; } 201 202 oop pop() { 203 if (!isEmpty()) { 204 return _base[--_index] ; 205 } 206 return NULL; 207 } 208 209 bool push(oop ptr) { 210 if (isFull()) { 211 return false; 212 } else { 213 _base[_index++] = ptr; 214 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 215 return true; 216 } 217 } 218 219 bool isEmpty() const { return _index == 0; } 220 bool isFull() const { 221 assert(_index <= _capacity, "buffer overflow"); 222 return _index == _capacity; 223 } 224 225 size_t length() { return _index; } 226 227 // "Parallel versions" of some of the above 228 oop par_pop() { 229 // lock and pop 230 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 231 return pop(); 232 } 233 234 bool par_push(oop ptr) { 235 // lock and push 236 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 237 return push(ptr); 238 } 239 240 // Forcibly reset the stack, losing all of its contents. 241 void reset() { 242 _index = 0; 243 } 244 245 // Expand the stack, typically in response to an overflow condition. 246 void expand(); 247 248 // Compute the least valued stack element. 249 oop least_value(HeapWord* low) { 250 oop least = (oop)low; 251 for (size_t i = 0; i < _index; i++) { 252 least = MIN2(least, _base[i]); 253 } 254 return least; 255 } 256 257 // Exposed here to allow stack expansion in || case. 258 Mutex* par_lock() { return &_par_lock; } 259 }; 260 261 class CardTableRS; 262 class CMSParGCThreadState; 263 264 class ModUnionClosure: public MemRegionClosure { 265 protected: 266 CMSBitMap* _t; 267 public: 268 ModUnionClosure(CMSBitMap* t): _t(t) { } 269 void do_MemRegion(MemRegion mr); 270 }; 271 272 class ModUnionClosurePar: public ModUnionClosure { 273 public: 274 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 275 void do_MemRegion(MemRegion mr); 276 }; 277 278 // Survivor Chunk Array in support of parallelization of 279 // Survivor Space rescan. 280 class ChunkArray: public CHeapObj<mtGC> { 281 size_t _index; 282 size_t _capacity; 283 size_t _overflows; 284 HeapWord** _array; // storage for array 285 286 public: 287 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 288 ChunkArray(HeapWord** a, size_t c): 289 _index(0), _capacity(c), _overflows(0), _array(a) {} 290 291 HeapWord** array() { return _array; } 292 void set_array(HeapWord** a) { _array = a; } 293 294 size_t capacity() { return _capacity; } 295 void set_capacity(size_t c) { _capacity = c; } 296 297 size_t end() { 298 assert(_index <= capacity(), 299 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 300 _index, _capacity); 301 return _index; 302 } // exclusive 303 304 HeapWord* nth(size_t n) { 305 assert(n < end(), "Out of bounds access"); 306 return _array[n]; 307 } 308 309 void reset() { 310 _index = 0; 311 if (_overflows > 0) { 312 log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows); 313 } 314 _overflows = 0; 315 } 316 317 void record_sample(HeapWord* p, size_t sz) { 318 // For now we do not do anything with the size 319 if (_index < _capacity) { 320 _array[_index++] = p; 321 } else { 322 ++_overflows; 323 assert(_index == _capacity, 324 "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 325 "): out of bounds at overflow#" SIZE_FORMAT, 326 _index, _capacity, _overflows); 327 } 328 } 329 }; 330 331 // 332 // Timing, allocation and promotion statistics for gc scheduling and incremental 333 // mode pacing. Most statistics are exponential averages. 334 // 335 class CMSStats VALUE_OBJ_CLASS_SPEC { 336 private: 337 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 338 339 // The following are exponential averages with factor alpha: 340 // avg = (100 - alpha) * avg + alpha * cur_sample 341 // 342 // The durations measure: end_time[n] - start_time[n] 343 // The periods measure: start_time[n] - start_time[n-1] 344 // 345 // The cms period and duration include only concurrent collections; time spent 346 // in foreground cms collections due to System.gc() or because of a failure to 347 // keep up are not included. 348 // 349 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 350 // real value, but is used only after the first period. A value of 100 is 351 // used for the first sample so it gets the entire weight. 352 unsigned int _saved_alpha; // 0-100 353 unsigned int _gc0_alpha; 354 unsigned int _cms_alpha; 355 356 double _gc0_duration; 357 double _gc0_period; 358 size_t _gc0_promoted; // bytes promoted per gc0 359 double _cms_duration; 360 double _cms_duration_pre_sweep; // time from initiation to start of sweep 361 double _cms_period; 362 size_t _cms_allocated; // bytes of direct allocation per gc0 period 363 364 // Timers. 365 elapsedTimer _cms_timer; 366 TimeStamp _gc0_begin_time; 367 TimeStamp _cms_begin_time; 368 TimeStamp _cms_end_time; 369 370 // Snapshots of the amount used in the CMS generation. 371 size_t _cms_used_at_gc0_begin; 372 size_t _cms_used_at_gc0_end; 373 size_t _cms_used_at_cms_begin; 374 375 // Used to prevent the duty cycle from being reduced in the middle of a cms 376 // cycle. 377 bool _allow_duty_cycle_reduction; 378 379 enum { 380 _GC0_VALID = 0x1, 381 _CMS_VALID = 0x2, 382 _ALL_VALID = _GC0_VALID | _CMS_VALID 383 }; 384 385 unsigned int _valid_bits; 386 387 protected: 388 // In support of adjusting of cms trigger ratios based on history 389 // of concurrent mode failure. 390 double cms_free_adjustment_factor(size_t free) const; 391 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 392 393 public: 394 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 395 unsigned int alpha = CMSExpAvgFactor); 396 397 // Whether or not the statistics contain valid data; higher level statistics 398 // cannot be called until this returns true (they require at least one young 399 // gen and one cms cycle to have completed). 400 bool valid() const; 401 402 // Record statistics. 403 void record_gc0_begin(); 404 void record_gc0_end(size_t cms_gen_bytes_used); 405 void record_cms_begin(); 406 void record_cms_end(); 407 408 // Allow management of the cms timer, which must be stopped/started around 409 // yield points. 410 elapsedTimer& cms_timer() { return _cms_timer; } 411 void start_cms_timer() { _cms_timer.start(); } 412 void stop_cms_timer() { _cms_timer.stop(); } 413 414 // Basic statistics; units are seconds or bytes. 415 double gc0_period() const { return _gc0_period; } 416 double gc0_duration() const { return _gc0_duration; } 417 size_t gc0_promoted() const { return _gc0_promoted; } 418 double cms_period() const { return _cms_period; } 419 double cms_duration() const { return _cms_duration; } 420 size_t cms_allocated() const { return _cms_allocated; } 421 422 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 423 424 // Seconds since the last background cms cycle began or ended. 425 double cms_time_since_begin() const; 426 double cms_time_since_end() const; 427 428 // Higher level statistics--caller must check that valid() returns true before 429 // calling. 430 431 // Returns bytes promoted per second of wall clock time. 432 double promotion_rate() const; 433 434 // Returns bytes directly allocated per second of wall clock time. 435 double cms_allocation_rate() const; 436 437 // Rate at which space in the cms generation is being consumed (sum of the 438 // above two). 439 double cms_consumption_rate() const; 440 441 // Returns an estimate of the number of seconds until the cms generation will 442 // fill up, assuming no collection work is done. 443 double time_until_cms_gen_full() const; 444 445 // Returns an estimate of the number of seconds remaining until 446 // the cms generation collection should start. 447 double time_until_cms_start() const; 448 449 // End of higher level statistics. 450 451 // Debugging. 452 void print_on(outputStream* st) const PRODUCT_RETURN; 453 void print() const { print_on(tty); } 454 }; 455 456 // A closure related to weak references processing which 457 // we embed in the CMSCollector, since we need to pass 458 // it to the reference processor for secondary filtering 459 // of references based on reachability of referent; 460 // see role of _is_alive_non_header closure in the 461 // ReferenceProcessor class. 462 // For objects in the CMS generation, this closure checks 463 // if the object is "live" (reachable). Used in weak 464 // reference processing. 465 class CMSIsAliveClosure: public BoolObjectClosure { 466 const MemRegion _span; 467 const CMSBitMap* _bit_map; 468 469 friend class CMSCollector; 470 public: 471 CMSIsAliveClosure(MemRegion span, 472 CMSBitMap* bit_map): 473 _span(span), 474 _bit_map(bit_map) { 475 assert(!span.is_empty(), "Empty span could spell trouble"); 476 } 477 478 bool do_object_b(oop obj); 479 }; 480 481 482 // Implements AbstractRefProcTaskExecutor for CMS. 483 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 484 public: 485 486 CMSRefProcTaskExecutor(CMSCollector& collector) 487 : _collector(collector) 488 { } 489 490 // Executes a task using worker threads. 491 virtual void execute(ProcessTask& task); 492 virtual void execute(EnqueueTask& task); 493 private: 494 CMSCollector& _collector; 495 }; 496 497 498 class CMSCollector: public CHeapObj<mtGC> { 499 friend class VMStructs; 500 friend class ConcurrentMarkSweepThread; 501 friend class ConcurrentMarkSweepGeneration; 502 friend class CompactibleFreeListSpace; 503 friend class CMSParMarkTask; 504 friend class CMSParInitialMarkTask; 505 friend class CMSParRemarkTask; 506 friend class CMSConcMarkingTask; 507 friend class CMSRefProcTaskProxy; 508 friend class CMSRefProcTaskExecutor; 509 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 510 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 511 friend class PushOrMarkClosure; // to access _restart_addr 512 friend class ParPushOrMarkClosure; // to access _restart_addr 513 friend class MarkFromRootsClosure; // -- ditto -- 514 // ... and for clearing cards 515 friend class ParMarkFromRootsClosure; // to access _restart_addr 516 // ... and for clearing cards 517 friend class ParConcMarkingClosure; // to access _restart_addr etc. 518 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 519 friend class PushAndMarkVerifyClosure; // -- ditto -- 520 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 521 friend class PushAndMarkClosure; // -- ditto -- 522 friend class ParPushAndMarkClosure; // -- ditto -- 523 friend class CMSKeepAliveClosure; // -- ditto -- 524 friend class CMSDrainMarkingStackClosure; // -- ditto -- 525 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 526 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 527 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 528 friend class VM_CMS_Operation; 529 friend class VM_CMS_Initial_Mark; 530 friend class VM_CMS_Final_Remark; 531 friend class TraceCMSMemoryManagerStats; 532 533 private: 534 jlong _time_of_last_gc; 535 void update_time_of_last_gc(jlong now) { 536 _time_of_last_gc = now; 537 } 538 539 OopTaskQueueSet* _task_queues; 540 541 // Overflow list of grey objects, threaded through mark-word 542 // Manipulated with CAS in the parallel/multi-threaded case. 543 oopDesc* volatile _overflow_list; 544 // The following array-pair keeps track of mark words 545 // displaced for accommodating overflow list above. 546 // This code will likely be revisited under RFE#4922830. 547 Stack<oop, mtGC> _preserved_oop_stack; 548 Stack<markOop, mtGC> _preserved_mark_stack; 549 550 int* _hash_seed; 551 552 // In support of multi-threaded concurrent phases 553 YieldingFlexibleWorkGang* _conc_workers; 554 555 // Performance Counters 556 CollectorCounters* _gc_counters; 557 CollectorCounters* _cgc_counters; 558 559 // Initialization Errors 560 bool _completed_initialization; 561 562 // In support of ExplicitGCInvokesConcurrent 563 static bool _full_gc_requested; 564 static GCCause::Cause _full_gc_cause; 565 unsigned int _collection_count_start; 566 567 // Should we unload classes this concurrent cycle? 568 bool _should_unload_classes; 569 unsigned int _concurrent_cycles_since_last_unload; 570 unsigned int concurrent_cycles_since_last_unload() const { 571 return _concurrent_cycles_since_last_unload; 572 } 573 // Did we (allow) unload classes in the previous concurrent cycle? 574 bool unloaded_classes_last_cycle() const { 575 return concurrent_cycles_since_last_unload() == 0; 576 } 577 // Root scanning options for perm gen 578 int _roots_scanning_options; 579 int roots_scanning_options() const { return _roots_scanning_options; } 580 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 581 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 582 583 // Verification support 584 CMSBitMap _verification_mark_bm; 585 void verify_after_remark_work_1(); 586 void verify_after_remark_work_2(); 587 588 // True if any verification flag is on. 589 bool _verifying; 590 bool verifying() const { return _verifying; } 591 void set_verifying(bool v) { _verifying = v; } 592 593 // Collector policy 594 ConcurrentMarkSweepPolicy* _collector_policy; 595 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 596 597 void set_did_compact(bool v); 598 599 // XXX Move these to CMSStats ??? FIX ME !!! 600 elapsedTimer _inter_sweep_timer; // Time between sweeps 601 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps 602 // Padded decaying average estimates of the above 603 AdaptivePaddedAverage _inter_sweep_estimate; 604 AdaptivePaddedAverage _intra_sweep_estimate; 605 606 CMSTracer* _gc_tracer_cm; 607 ConcurrentGCTimer* _gc_timer_cm; 608 609 bool _cms_start_registered; 610 611 GCHeapSummary _last_heap_summary; 612 MetaspaceSummary _last_metaspace_summary; 613 614 void register_gc_start(GCCause::Cause cause); 615 void register_gc_end(); 616 void save_heap_summary(); 617 void report_heap_summary(GCWhen::Type when); 618 619 protected: 620 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) 621 MemRegion _span; // Span covering above two 622 CardTableRS* _ct; // Card table 623 624 // CMS marking support structures 625 CMSBitMap _markBitMap; 626 CMSBitMap _modUnionTable; 627 CMSMarkStack _markStack; 628 629 HeapWord* _restart_addr; // In support of marking stack overflow 630 void lower_restart_addr(HeapWord* low); 631 632 // Counters in support of marking stack / work queue overflow handling: 633 // a non-zero value indicates certain types of overflow events during 634 // the current CMS cycle and could lead to stack resizing efforts at 635 // an opportune future time. 636 size_t _ser_pmc_preclean_ovflw; 637 size_t _ser_pmc_remark_ovflw; 638 size_t _par_pmc_remark_ovflw; 639 size_t _ser_kac_preclean_ovflw; 640 size_t _ser_kac_ovflw; 641 size_t _par_kac_ovflw; 642 NOT_PRODUCT(ssize_t _num_par_pushes;) 643 644 // ("Weak") Reference processing support. 645 ReferenceProcessor* _ref_processor; 646 CMSIsAliveClosure _is_alive_closure; 647 // Keep this textually after _markBitMap and _span; c'tor dependency. 648 649 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work 650 ModUnionClosurePar _modUnionClosurePar; 651 652 // CMS abstract state machine 653 // initial_state: Idling 654 // next_state(Idling) = {Marking} 655 // next_state(Marking) = {Precleaning, Sweeping} 656 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 657 // next_state(AbortablePreclean) = {FinalMarking} 658 // next_state(FinalMarking) = {Sweeping} 659 // next_state(Sweeping) = {Resizing} 660 // next_state(Resizing) = {Resetting} 661 // next_state(Resetting) = {Idling} 662 // The numeric values below are chosen so that: 663 // . _collectorState <= Idling == post-sweep && pre-mark 664 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 665 // precleaning || abortablePrecleanb 666 public: 667 enum CollectorState { 668 Resizing = 0, 669 Resetting = 1, 670 Idling = 2, 671 InitialMarking = 3, 672 Marking = 4, 673 Precleaning = 5, 674 AbortablePreclean = 6, 675 FinalMarking = 7, 676 Sweeping = 8 677 }; 678 protected: 679 static CollectorState _collectorState; 680 681 // State related to prologue/epilogue invocation for my generations 682 bool _between_prologue_and_epilogue; 683 684 // Signaling/State related to coordination between fore- and background GC 685 // Note: When the baton has been passed from background GC to foreground GC, 686 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 687 static bool _foregroundGCIsActive; // true iff foreground collector is active or 688 // wants to go active 689 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 690 // yet passed the baton to the foreground GC 691 692 // Support for CMSScheduleRemark (abortable preclean) 693 bool _abort_preclean; 694 bool _start_sampling; 695 696 int _numYields; 697 size_t _numDirtyCards; 698 size_t _sweep_count; 699 700 // Occupancy used for bootstrapping stats 701 double _bootstrap_occupancy; 702 703 // Timer 704 elapsedTimer _timer; 705 706 // Timing, allocation and promotion statistics, used for scheduling. 707 CMSStats _stats; 708 709 enum CMS_op_type { 710 CMS_op_checkpointRootsInitial, 711 CMS_op_checkpointRootsFinal 712 }; 713 714 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 715 bool stop_world_and_do(CMS_op_type op); 716 717 OopTaskQueueSet* task_queues() { return _task_queues; } 718 int* hash_seed(int i) { return &_hash_seed[i]; } 719 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 720 721 // Support for parallelizing Eden rescan in CMS remark phase 722 void sample_eden(); // ... sample Eden space top 723 724 private: 725 // Support for parallelizing young gen rescan in CMS remark phase 726 ParNewGeneration* _young_gen; 727 728 HeapWord* volatile* _top_addr; // ... Top of Eden 729 HeapWord** _end_addr; // ... End of Eden 730 Mutex* _eden_chunk_lock; 731 HeapWord** _eden_chunk_array; // ... Eden partitioning array 732 size_t _eden_chunk_index; // ... top (exclusive) of array 733 size_t _eden_chunk_capacity; // ... max entries in array 734 735 // Support for parallelizing survivor space rescan 736 HeapWord** _survivor_chunk_array; 737 size_t _survivor_chunk_index; 738 size_t _survivor_chunk_capacity; 739 size_t* _cursor; 740 ChunkArray* _survivor_plab_array; 741 742 // Support for marking stack overflow handling 743 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 744 bool par_take_from_overflow_list(size_t num, 745 OopTaskQueue* to_work_q, 746 int no_of_gc_threads); 747 void push_on_overflow_list(oop p); 748 void par_push_on_overflow_list(oop p); 749 // The following is, obviously, not, in general, "MT-stable" 750 bool overflow_list_is_empty() const; 751 752 void preserve_mark_if_necessary(oop p); 753 void par_preserve_mark_if_necessary(oop p); 754 void preserve_mark_work(oop p, markOop m); 755 void restore_preserved_marks_if_any(); 756 NOT_PRODUCT(bool no_preserved_marks() const;) 757 // In support of testing overflow code 758 NOT_PRODUCT(int _overflow_counter;) 759 NOT_PRODUCT(bool simulate_overflow();) // Sequential 760 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 761 762 // CMS work methods 763 void checkpointRootsInitialWork(); // Initial checkpoint work 764 765 // A return value of false indicates failure due to stack overflow 766 bool markFromRootsWork(); // Concurrent marking work 767 768 public: // FIX ME!!! only for testing 769 bool do_marking_st(); // Single-threaded marking 770 bool do_marking_mt(); // Multi-threaded marking 771 772 private: 773 774 // Concurrent precleaning work 775 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen, 776 ScanMarkedObjectsAgainCarefullyClosure* cl); 777 size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, 778 ScanMarkedObjectsAgainCarefullyClosure* cl); 779 // Does precleaning work, returning a quantity indicative of 780 // the amount of "useful work" done. 781 size_t preclean_work(bool clean_refs, bool clean_survivors); 782 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 783 void abortable_preclean(); // Preclean while looking for possible abort 784 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 785 // Helper function for above; merge-sorts the per-thread plab samples 786 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 787 // Resets (i.e. clears) the per-thread plab sample vectors 788 void reset_survivor_plab_arrays(); 789 790 // Final (second) checkpoint work 791 void checkpointRootsFinalWork(); 792 // Work routine for parallel version of remark 793 void do_remark_parallel(); 794 // Work routine for non-parallel version of remark 795 void do_remark_non_parallel(); 796 // Reference processing work routine (during second checkpoint) 797 void refProcessingWork(); 798 799 // Concurrent sweeping work 800 void sweepWork(ConcurrentMarkSweepGeneration* old_gen); 801 802 // Concurrent resetting of support data structures 803 void reset_concurrent(); 804 // Resetting of support data structures from a STW full GC 805 void reset_stw(); 806 807 // Clear _expansion_cause fields of constituent generations 808 void clear_expansion_cause(); 809 810 // An auxiliary method used to record the ends of 811 // used regions of each generation to limit the extent of sweep 812 void save_sweep_limits(); 813 814 // A work method used by the foreground collector to do 815 // a mark-sweep-compact. 816 void do_compaction_work(bool clear_all_soft_refs); 817 818 // Work methods for reporting concurrent mode interruption or failure 819 bool is_external_interruption(); 820 void report_concurrent_mode_interruption(); 821 822 // If the background GC is active, acquire control from the background 823 // GC and do the collection. 824 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 825 826 // For synchronizing passing of control from background to foreground 827 // GC. waitForForegroundGC() is called by the background 828 // collector. It if had to wait for a foreground collection, 829 // it returns true and the background collection should assume 830 // that the collection was finished by the foreground 831 // collector. 832 bool waitForForegroundGC(); 833 834 size_t block_size_using_printezis_bits(HeapWord* addr) const; 835 size_t block_size_if_printezis_bits(HeapWord* addr) const; 836 HeapWord* next_card_start_after_block(HeapWord* addr) const; 837 838 void setup_cms_unloading_and_verification_state(); 839 public: 840 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 841 CardTableRS* ct, 842 ConcurrentMarkSweepPolicy* cp); 843 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 844 845 ReferenceProcessor* ref_processor() { return _ref_processor; } 846 void ref_processor_init(); 847 848 Mutex* bitMapLock() const { return _markBitMap.lock(); } 849 static CollectorState abstract_state() { return _collectorState; } 850 851 bool should_abort_preclean() const; // Whether preclean should be aborted. 852 size_t get_eden_used() const; 853 size_t get_eden_capacity() const; 854 855 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 856 857 // Locking checks 858 NOT_PRODUCT(static bool have_cms_token();) 859 860 bool shouldConcurrentCollect(); 861 862 void collect(bool full, 863 bool clear_all_soft_refs, 864 size_t size, 865 bool tlab); 866 void collect_in_background(GCCause::Cause cause); 867 868 // In support of ExplicitGCInvokesConcurrent 869 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 870 // Should we unload classes in a particular concurrent cycle? 871 bool should_unload_classes() const { 872 return _should_unload_classes; 873 } 874 void update_should_unload_classes(); 875 876 void direct_allocated(HeapWord* start, size_t size); 877 878 // Object is dead if not marked and current phase is sweeping. 879 bool is_dead_obj(oop obj) const; 880 881 // After a promotion (of "start"), do any necessary marking. 882 // If "par", then it's being done by a parallel GC thread. 883 // The last two args indicate if we need precise marking 884 // and if so the size of the object so it can be dirtied 885 // in its entirety. 886 void promoted(bool par, HeapWord* start, 887 bool is_obj_array, size_t obj_size); 888 889 void getFreelistLocks() const; 890 void releaseFreelistLocks() const; 891 bool haveFreelistLocks() const; 892 893 // Adjust size of underlying generation 894 void compute_new_size(); 895 896 // GC prologue and epilogue 897 void gc_prologue(bool full); 898 void gc_epilogue(bool full); 899 900 jlong time_of_last_gc(jlong now) { 901 if (_collectorState <= Idling) { 902 // gc not in progress 903 return _time_of_last_gc; 904 } else { 905 // collection in progress 906 return now; 907 } 908 } 909 910 // Support for parallel remark of survivor space 911 void* get_data_recorder(int thr_num); 912 void sample_eden_chunk(); 913 914 CMSBitMap* markBitMap() { return &_markBitMap; } 915 void directAllocated(HeapWord* start, size_t size); 916 917 // Main CMS steps and related support 918 void checkpointRootsInitial(); 919 bool markFromRoots(); // a return value of false indicates failure 920 // due to stack overflow 921 void preclean(); 922 void checkpointRootsFinal(); 923 void sweep(); 924 925 // Check that the currently executing thread is the expected 926 // one (foreground collector or background collector). 927 static void check_correct_thread_executing() PRODUCT_RETURN; 928 929 NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);) 930 931 // Performance Counter Support 932 CollectorCounters* counters() { return _gc_counters; } 933 CollectorCounters* cgc_counters() { return _cgc_counters; } 934 935 // Timer stuff 936 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 937 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 938 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 939 jlong timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); } 940 941 int yields() { return _numYields; } 942 void resetYields() { _numYields = 0; } 943 void incrementYields() { _numYields++; } 944 void resetNumDirtyCards() { _numDirtyCards = 0; } 945 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 946 size_t numDirtyCards() { return _numDirtyCards; } 947 948 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 949 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 950 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 951 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 952 size_t sweep_count() const { return _sweep_count; } 953 void increment_sweep_count() { _sweep_count++; } 954 955 // Timers/stats for gc scheduling and incremental mode pacing. 956 CMSStats& stats() { return _stats; } 957 958 // Adaptive size policy 959 AdaptiveSizePolicy* size_policy(); 960 961 static void print_on_error(outputStream* st); 962 963 // Debugging 964 void verify(); 965 bool verify_after_remark(); 966 void verify_ok_to_terminate() const PRODUCT_RETURN; 967 void verify_work_stacks_empty() const PRODUCT_RETURN; 968 void verify_overflow_empty() const PRODUCT_RETURN; 969 970 // Convenience methods in support of debugging 971 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 972 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 973 974 // Accessors 975 CMSMarkStack* verification_mark_stack() { return &_markStack; } 976 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 977 978 // Initialization errors 979 bool completed_initialization() { return _completed_initialization; } 980 981 void print_eden_and_survivor_chunk_arrays(); 982 983 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 984 }; 985 986 class CMSExpansionCause : public AllStatic { 987 public: 988 enum Cause { 989 _no_expansion, 990 _satisfy_free_ratio, 991 _satisfy_promotion, 992 _satisfy_allocation, 993 _allocate_par_lab, 994 _allocate_par_spooling_space, 995 _adaptive_size_policy 996 }; 997 // Return a string describing the cause of the expansion. 998 static const char* to_string(CMSExpansionCause::Cause cause); 999 }; 1000 1001 class ConcurrentMarkSweepGeneration: public CardGeneration { 1002 friend class VMStructs; 1003 friend class ConcurrentMarkSweepThread; 1004 friend class ConcurrentMarkSweep; 1005 friend class CMSCollector; 1006 protected: 1007 static CMSCollector* _collector; // the collector that collects us 1008 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1009 1010 // Performance Counters 1011 GenerationCounters* _gen_counters; 1012 GSpaceCounters* _space_counters; 1013 1014 // Words directly allocated, used by CMSStats. 1015 size_t _direct_allocated_words; 1016 1017 // Non-product stat counters 1018 NOT_PRODUCT( 1019 size_t _numObjectsPromoted; 1020 size_t _numWordsPromoted; 1021 size_t _numObjectsAllocated; 1022 size_t _numWordsAllocated; 1023 ) 1024 1025 // Used for sizing decisions 1026 bool _incremental_collection_failed; 1027 bool incremental_collection_failed() { 1028 return _incremental_collection_failed; 1029 } 1030 void set_incremental_collection_failed() { 1031 _incremental_collection_failed = true; 1032 } 1033 void clear_incremental_collection_failed() { 1034 _incremental_collection_failed = false; 1035 } 1036 1037 // accessors 1038 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1039 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1040 1041 // Accessing spaces 1042 CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; } 1043 1044 private: 1045 // For parallel young-gen GC support. 1046 CMSParGCThreadState** _par_gc_thread_states; 1047 1048 // Reason generation was expanded 1049 CMSExpansionCause::Cause _expansion_cause; 1050 1051 // In support of MinChunkSize being larger than min object size 1052 const double _dilatation_factor; 1053 1054 // True if a compacting collection was done. 1055 bool _did_compact; 1056 bool did_compact() { return _did_compact; } 1057 1058 // Fraction of current occupancy at which to start a CMS collection which 1059 // will collect this generation (at least). 1060 double _initiating_occupancy; 1061 1062 protected: 1063 // Shrink generation by specified size (returns false if unable to shrink) 1064 void shrink_free_list_by(size_t bytes); 1065 1066 // Update statistics for GC 1067 virtual void update_gc_stats(Generation* current_generation, bool full); 1068 1069 // Maximum available space in the generation (including uncommitted) 1070 // space. 1071 size_t max_available() const; 1072 1073 // getter and initializer for _initiating_occupancy field. 1074 double initiating_occupancy() const { return _initiating_occupancy; } 1075 void init_initiating_occupancy(intx io, uintx tr); 1076 1077 void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); 1078 1079 void assert_correct_size_change_locking(); 1080 1081 public: 1082 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct); 1083 1084 // Accessors 1085 CMSCollector* collector() const { return _collector; } 1086 static void set_collector(CMSCollector* collector) { 1087 assert(_collector == NULL, "already set"); 1088 _collector = collector; 1089 } 1090 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1091 1092 Mutex* freelistLock() const; 1093 1094 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1095 1096 void set_did_compact(bool v) { _did_compact = v; } 1097 1098 bool refs_discovery_is_atomic() const { return false; } 1099 bool refs_discovery_is_mt() const { 1100 // Note: CMS does MT-discovery during the parallel-remark 1101 // phases. Use ReferenceProcessorMTMutator to make refs 1102 // discovery MT-safe during such phases or other parallel 1103 // discovery phases in the future. This may all go away 1104 // if/when we decide that refs discovery is sufficiently 1105 // rare that the cost of the CAS's involved is in the 1106 // noise. That's a measurement that should be done, and 1107 // the code simplified if that turns out to be the case. 1108 return ConcGCThreads > 1; 1109 } 1110 1111 // Override 1112 virtual void ref_processor_init(); 1113 1114 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1115 1116 // Space enquiries 1117 double occupancy() const { return ((double)used())/((double)capacity()); } 1118 size_t contiguous_available() const; 1119 size_t unsafe_max_alloc_nogc() const; 1120 1121 // over-rides 1122 MemRegion used_region_at_save_marks() const; 1123 1124 // Adjust quantities in the generation affected by 1125 // the compaction. 1126 void reset_after_compaction(); 1127 1128 // Allocation support 1129 HeapWord* allocate(size_t size, bool tlab); 1130 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1131 oop promote(oop obj, size_t obj_size); 1132 HeapWord* par_allocate(size_t size, bool tlab) { 1133 return allocate(size, tlab); 1134 } 1135 1136 1137 // Used by CMSStats to track direct allocation. The value is sampled and 1138 // reset after each young gen collection. 1139 size_t direct_allocated_words() const { return _direct_allocated_words; } 1140 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1141 1142 // Overrides for parallel promotion. 1143 virtual oop par_promote(int thread_num, 1144 oop obj, markOop m, size_t word_sz); 1145 virtual void par_promote_alloc_done(int thread_num); 1146 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1147 1148 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1149 1150 // Inform this (old) generation that a promotion failure was 1151 // encountered during a collection of the young generation. 1152 virtual void promotion_failure_occurred(); 1153 1154 bool should_collect(bool full, size_t size, bool tlab); 1155 virtual bool should_concurrent_collect() const; 1156 virtual bool is_too_full() const; 1157 void collect(bool full, 1158 bool clear_all_soft_refs, 1159 size_t size, 1160 bool tlab); 1161 1162 HeapWord* expand_and_allocate(size_t word_size, 1163 bool tlab, 1164 bool parallel = false); 1165 1166 // GC prologue and epilogue 1167 void gc_prologue(bool full); 1168 void gc_prologue_work(bool full, bool registerClosure, 1169 ModUnionClosure* modUnionClosure); 1170 void gc_epilogue(bool full); 1171 void gc_epilogue_work(bool full); 1172 1173 // Time since last GC of this generation 1174 jlong time_of_last_gc(jlong now) { 1175 return collector()->time_of_last_gc(now); 1176 } 1177 void update_time_of_last_gc(jlong now) { 1178 collector()-> update_time_of_last_gc(now); 1179 } 1180 1181 // Allocation failure 1182 void shrink(size_t bytes); 1183 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1184 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1185 1186 // Iteration support and related enquiries 1187 void save_marks(); 1188 bool no_allocs_since_save_marks(); 1189 1190 // Iteration support specific to CMS generations 1191 void save_sweep_limit(); 1192 1193 // More iteration support 1194 virtual void oop_iterate(ExtendedOopClosure* cl); 1195 virtual void safe_object_iterate(ObjectClosure* cl); 1196 virtual void object_iterate(ObjectClosure* cl); 1197 1198 // Need to declare the full complement of closures, whether we'll 1199 // override them or not, or get message from the compiler: 1200 // oop_since_save_marks_iterate_nv hides virtual function... 1201 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1202 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1203 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1204 1205 // Smart allocation XXX -- move to CFLSpace? 1206 void setNearLargestChunk(); 1207 bool isNearLargestChunk(HeapWord* addr); 1208 1209 // Get the chunk at the end of the space. Delegates to 1210 // the space. 1211 FreeChunk* find_chunk_at_end(); 1212 1213 void post_compact(); 1214 1215 // Debugging 1216 void prepare_for_verify(); 1217 void verify(); 1218 void print_statistics() PRODUCT_RETURN; 1219 1220 // Performance Counters support 1221 virtual void update_counters(); 1222 virtual void update_counters(size_t used); 1223 void initialize_performance_counters(); 1224 CollectorCounters* counters() { return collector()->counters(); } 1225 1226 // Support for parallel remark of survivor space 1227 void* get_data_recorder(int thr_num) { 1228 //Delegate to collector 1229 return collector()->get_data_recorder(thr_num); 1230 } 1231 void sample_eden_chunk() { 1232 //Delegate to collector 1233 return collector()->sample_eden_chunk(); 1234 } 1235 1236 // Printing 1237 const char* name() const; 1238 virtual const char* short_name() const { return "CMS"; } 1239 void print() const; 1240 1241 // Resize the generation after a compacting GC. The 1242 // generation can be treated as a contiguous space 1243 // after the compaction. 1244 virtual void compute_new_size(); 1245 // Resize the generation after a non-compacting 1246 // collection. 1247 void compute_new_size_free_list(); 1248 }; 1249 1250 // 1251 // Closures of various sorts used by CMS to accomplish its work 1252 // 1253 1254 // This closure is used to do concurrent marking from the roots 1255 // following the first checkpoint. 1256 class MarkFromRootsClosure: public BitMapClosure { 1257 CMSCollector* _collector; 1258 MemRegion _span; 1259 CMSBitMap* _bitMap; 1260 CMSBitMap* _mut; 1261 CMSMarkStack* _markStack; 1262 bool _yield; 1263 int _skipBits; 1264 HeapWord* _finger; 1265 HeapWord* _threshold; 1266 DEBUG_ONLY(bool _verifying;) 1267 1268 public: 1269 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1270 CMSBitMap* bitMap, 1271 CMSMarkStack* markStack, 1272 bool should_yield, bool verifying = false); 1273 bool do_bit(size_t offset); 1274 void reset(HeapWord* addr); 1275 inline void do_yield_check(); 1276 1277 private: 1278 void scanOopsInOop(HeapWord* ptr); 1279 void do_yield_work(); 1280 }; 1281 1282 // This closure is used to do concurrent multi-threaded 1283 // marking from the roots following the first checkpoint. 1284 // XXX This should really be a subclass of The serial version 1285 // above, but i have not had the time to refactor things cleanly. 1286 class ParMarkFromRootsClosure: public BitMapClosure { 1287 CMSCollector* _collector; 1288 MemRegion _whole_span; 1289 MemRegion _span; 1290 CMSBitMap* _bit_map; 1291 CMSBitMap* _mut; 1292 OopTaskQueue* _work_queue; 1293 CMSMarkStack* _overflow_stack; 1294 int _skip_bits; 1295 HeapWord* _finger; 1296 HeapWord* _threshold; 1297 CMSConcMarkingTask* _task; 1298 public: 1299 ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1300 MemRegion span, 1301 CMSBitMap* bit_map, 1302 OopTaskQueue* work_queue, 1303 CMSMarkStack* overflow_stack); 1304 bool do_bit(size_t offset); 1305 inline void do_yield_check(); 1306 1307 private: 1308 void scan_oops_in_oop(HeapWord* ptr); 1309 void do_yield_work(); 1310 bool get_work_from_overflow_stack(); 1311 }; 1312 1313 // The following closures are used to do certain kinds of verification of 1314 // CMS marking. 1315 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { 1316 CMSCollector* _collector; 1317 MemRegion _span; 1318 CMSBitMap* _verification_bm; 1319 CMSBitMap* _cms_bm; 1320 CMSMarkStack* _mark_stack; 1321 protected: 1322 void do_oop(oop p); 1323 template <class T> inline void do_oop_work(T *p) { 1324 oop obj = oopDesc::load_decode_heap_oop(p); 1325 do_oop(obj); 1326 } 1327 public: 1328 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1329 MemRegion span, 1330 CMSBitMap* verification_bm, 1331 CMSBitMap* cms_bm, 1332 CMSMarkStack* mark_stack); 1333 void do_oop(oop* p); 1334 void do_oop(narrowOop* p); 1335 1336 // Deal with a stack overflow condition 1337 void handle_stack_overflow(HeapWord* lost); 1338 }; 1339 1340 class MarkFromRootsVerifyClosure: public BitMapClosure { 1341 CMSCollector* _collector; 1342 MemRegion _span; 1343 CMSBitMap* _verification_bm; 1344 CMSBitMap* _cms_bm; 1345 CMSMarkStack* _mark_stack; 1346 HeapWord* _finger; 1347 PushAndMarkVerifyClosure _pam_verify_closure; 1348 public: 1349 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1350 CMSBitMap* verification_bm, 1351 CMSBitMap* cms_bm, 1352 CMSMarkStack* mark_stack); 1353 bool do_bit(size_t offset); 1354 void reset(HeapWord* addr); 1355 }; 1356 1357 1358 // This closure is used to check that a certain set of bits is 1359 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1360 class FalseBitMapClosure: public BitMapClosure { 1361 public: 1362 bool do_bit(size_t offset) { 1363 guarantee(false, "Should not have a 1 bit"); 1364 return true; 1365 } 1366 }; 1367 1368 // A version of ObjectClosure with "memory" (see _previous_address below) 1369 class UpwardsObjectClosure: public BoolObjectClosure { 1370 HeapWord* _previous_address; 1371 public: 1372 UpwardsObjectClosure() : _previous_address(NULL) { } 1373 void set_previous(HeapWord* addr) { _previous_address = addr; } 1374 HeapWord* previous() { return _previous_address; } 1375 // A return value of "true" can be used by the caller to decide 1376 // if this object's end should *NOT* be recorded in 1377 // _previous_address above. 1378 virtual bool do_object_bm(oop obj, MemRegion mr) = 0; 1379 }; 1380 1381 // This closure is used during the second checkpointing phase 1382 // to rescan the marked objects on the dirty cards in the mod 1383 // union table and the card table proper. It's invoked via 1384 // MarkFromDirtyCardsClosure below. It uses either 1385 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1386 // declared in genOopClosures.hpp to accomplish some of its work. 1387 // In the parallel case the bitMap is shared, so access to 1388 // it needs to be suitably synchronized for updates by embedded 1389 // closures that update it; however, this closure itself only 1390 // reads the bit_map and because it is idempotent, is immune to 1391 // reading stale values. 1392 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1393 #ifdef ASSERT 1394 CMSCollector* _collector; 1395 MemRegion _span; 1396 union { 1397 CMSMarkStack* _mark_stack; 1398 OopTaskQueue* _work_queue; 1399 }; 1400 #endif // ASSERT 1401 bool _parallel; 1402 CMSBitMap* _bit_map; 1403 union { 1404 MarkRefsIntoAndScanClosure* _scan_closure; 1405 ParMarkRefsIntoAndScanClosure* _par_scan_closure; 1406 }; 1407 1408 public: 1409 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1410 MemRegion span, 1411 ReferenceProcessor* rp, 1412 CMSBitMap* bit_map, 1413 CMSMarkStack* mark_stack, 1414 MarkRefsIntoAndScanClosure* cl): 1415 #ifdef ASSERT 1416 _collector(collector), 1417 _span(span), 1418 _mark_stack(mark_stack), 1419 #endif // ASSERT 1420 _parallel(false), 1421 _bit_map(bit_map), 1422 _scan_closure(cl) { } 1423 1424 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1425 MemRegion span, 1426 ReferenceProcessor* rp, 1427 CMSBitMap* bit_map, 1428 OopTaskQueue* work_queue, 1429 ParMarkRefsIntoAndScanClosure* cl): 1430 #ifdef ASSERT 1431 _collector(collector), 1432 _span(span), 1433 _work_queue(work_queue), 1434 #endif // ASSERT 1435 _parallel(true), 1436 _bit_map(bit_map), 1437 _par_scan_closure(cl) { } 1438 1439 bool do_object_b(oop obj) { 1440 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1441 return false; 1442 } 1443 bool do_object_bm(oop p, MemRegion mr); 1444 }; 1445 1446 // This closure is used during the second checkpointing phase 1447 // to rescan the marked objects on the dirty cards in the mod 1448 // union table and the card table proper. It invokes 1449 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1450 // In the parallel case, the bit map is shared and requires 1451 // synchronized access. 1452 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1453 CompactibleFreeListSpace* _space; 1454 ScanMarkedObjectsAgainClosure _scan_cl; 1455 size_t _num_dirty_cards; 1456 1457 public: 1458 MarkFromDirtyCardsClosure(CMSCollector* collector, 1459 MemRegion span, 1460 CompactibleFreeListSpace* space, 1461 CMSBitMap* bit_map, 1462 CMSMarkStack* mark_stack, 1463 MarkRefsIntoAndScanClosure* cl): 1464 _space(space), 1465 _num_dirty_cards(0), 1466 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1467 mark_stack, cl) { } 1468 1469 MarkFromDirtyCardsClosure(CMSCollector* collector, 1470 MemRegion span, 1471 CompactibleFreeListSpace* space, 1472 CMSBitMap* bit_map, 1473 OopTaskQueue* work_queue, 1474 ParMarkRefsIntoAndScanClosure* cl): 1475 _space(space), 1476 _num_dirty_cards(0), 1477 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1478 work_queue, cl) { } 1479 1480 void do_MemRegion(MemRegion mr); 1481 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1482 size_t num_dirty_cards() { return _num_dirty_cards; } 1483 }; 1484 1485 // This closure is used in the non-product build to check 1486 // that there are no MemRegions with a certain property. 1487 class FalseMemRegionClosure: public MemRegionClosure { 1488 void do_MemRegion(MemRegion mr) { 1489 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1490 guarantee(false, "Should never be here"); 1491 } 1492 }; 1493 1494 // This closure is used during the precleaning phase 1495 // to "carefully" rescan marked objects on dirty cards. 1496 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1497 // to accomplish some of its work. 1498 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1499 CMSCollector* _collector; 1500 MemRegion _span; 1501 bool _yield; 1502 Mutex* _freelistLock; 1503 CMSBitMap* _bitMap; 1504 CMSMarkStack* _markStack; 1505 MarkRefsIntoAndScanClosure* _scanningClosure; 1506 DEBUG_ONLY(HeapWord* _last_scanned_object;) 1507 1508 public: 1509 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1510 MemRegion span, 1511 CMSBitMap* bitMap, 1512 CMSMarkStack* markStack, 1513 MarkRefsIntoAndScanClosure* cl, 1514 bool should_yield): 1515 _collector(collector), 1516 _span(span), 1517 _yield(should_yield), 1518 _bitMap(bitMap), 1519 _markStack(markStack), 1520 _scanningClosure(cl) 1521 DEBUG_ONLY(COMMA _last_scanned_object(NULL)) 1522 { } 1523 1524 void do_object(oop p) { 1525 guarantee(false, "call do_object_careful instead"); 1526 } 1527 1528 size_t do_object_careful(oop p) { 1529 guarantee(false, "Unexpected caller"); 1530 return 0; 1531 } 1532 1533 size_t do_object_careful_m(oop p, MemRegion mr); 1534 1535 void setFreelistLock(Mutex* m) { 1536 _freelistLock = m; 1537 _scanningClosure->set_freelistLock(m); 1538 } 1539 1540 private: 1541 inline bool do_yield_check(); 1542 1543 void do_yield_work(); 1544 }; 1545 1546 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1547 CMSCollector* _collector; 1548 MemRegion _span; 1549 bool _yield; 1550 CMSBitMap* _bit_map; 1551 CMSMarkStack* _mark_stack; 1552 PushAndMarkClosure* _scanning_closure; 1553 unsigned int _before_count; 1554 1555 public: 1556 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1557 MemRegion span, 1558 CMSBitMap* bit_map, 1559 CMSMarkStack* mark_stack, 1560 PushAndMarkClosure* cl, 1561 unsigned int before_count, 1562 bool should_yield): 1563 _collector(collector), 1564 _span(span), 1565 _yield(should_yield), 1566 _bit_map(bit_map), 1567 _mark_stack(mark_stack), 1568 _scanning_closure(cl), 1569 _before_count(before_count) 1570 { } 1571 1572 void do_object(oop p) { 1573 guarantee(false, "call do_object_careful instead"); 1574 } 1575 1576 size_t do_object_careful(oop p); 1577 1578 size_t do_object_careful_m(oop p, MemRegion mr) { 1579 guarantee(false, "Unexpected caller"); 1580 return 0; 1581 } 1582 1583 private: 1584 inline void do_yield_check(); 1585 void do_yield_work(); 1586 }; 1587 1588 // This closure is used to accomplish the sweeping work 1589 // after the second checkpoint but before the concurrent reset 1590 // phase. 1591 // 1592 // Terminology 1593 // left hand chunk (LHC) - block of one or more chunks currently being 1594 // coalesced. The LHC is available for coalescing with a new chunk. 1595 // right hand chunk (RHC) - block that is currently being swept that is 1596 // free or garbage that can be coalesced with the LHC. 1597 // _inFreeRange is true if there is currently a LHC 1598 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1599 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1600 // _freeFinger is the address of the current LHC 1601 class SweepClosure: public BlkClosureCareful { 1602 CMSCollector* _collector; // collector doing the work 1603 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1604 CompactibleFreeListSpace* _sp; // Space being swept 1605 HeapWord* _limit;// the address at or above which the sweep should stop 1606 // because we do not expect newly garbage blocks 1607 // eligible for sweeping past that address. 1608 Mutex* _freelistLock; // Free list lock (in space) 1609 CMSBitMap* _bitMap; // Marking bit map (in 1610 // generation) 1611 bool _inFreeRange; // Indicates if we are in the 1612 // midst of a free run 1613 bool _freeRangeInFreeLists; 1614 // Often, we have just found 1615 // a free chunk and started 1616 // a new free range; we do not 1617 // eagerly remove this chunk from 1618 // the free lists unless there is 1619 // a possibility of coalescing. 1620 // When true, this flag indicates 1621 // that the _freeFinger below 1622 // points to a potentially free chunk 1623 // that may still be in the free lists 1624 bool _lastFreeRangeCoalesced; 1625 // free range contains chunks 1626 // coalesced 1627 bool _yield; 1628 // Whether sweeping should be 1629 // done with yields. For instance 1630 // when done by the foreground 1631 // collector we shouldn't yield. 1632 HeapWord* _freeFinger; // When _inFreeRange is set, the 1633 // pointer to the "left hand 1634 // chunk" 1635 size_t _freeRangeSize; 1636 // When _inFreeRange is set, this 1637 // indicates the accumulated size 1638 // of the "left hand chunk" 1639 NOT_PRODUCT( 1640 size_t _numObjectsFreed; 1641 size_t _numWordsFreed; 1642 size_t _numObjectsLive; 1643 size_t _numWordsLive; 1644 size_t _numObjectsAlreadyFree; 1645 size_t _numWordsAlreadyFree; 1646 FreeChunk* _last_fc; 1647 ) 1648 private: 1649 // Code that is common to a free chunk or garbage when 1650 // encountered during sweeping. 1651 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1652 // Process a free chunk during sweeping. 1653 void do_already_free_chunk(FreeChunk *fc); 1654 // Work method called when processing an already free or a 1655 // freshly garbage chunk to do a lookahead and possibly a 1656 // preemptive flush if crossing over _limit. 1657 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1658 // Process a garbage chunk during sweeping. 1659 size_t do_garbage_chunk(FreeChunk *fc); 1660 // Process a live chunk during sweeping. 1661 size_t do_live_chunk(FreeChunk* fc); 1662 1663 // Accessors. 1664 HeapWord* freeFinger() const { return _freeFinger; } 1665 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1666 bool inFreeRange() const { return _inFreeRange; } 1667 void set_inFreeRange(bool v) { _inFreeRange = v; } 1668 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1669 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1670 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1671 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1672 1673 // Initialize a free range. 1674 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1675 // Return this chunk to the free lists. 1676 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1677 1678 // Check if we should yield and do so when necessary. 1679 inline void do_yield_check(HeapWord* addr); 1680 1681 // Yield 1682 void do_yield_work(HeapWord* addr); 1683 1684 // Debugging/Printing 1685 void print_free_block_coalesced(FreeChunk* fc) const; 1686 1687 public: 1688 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1689 CMSBitMap* bitMap, bool should_yield); 1690 ~SweepClosure() PRODUCT_RETURN; 1691 1692 size_t do_blk_careful(HeapWord* addr); 1693 void print() const { print_on(tty); } 1694 void print_on(outputStream *st) const; 1695 }; 1696 1697 // Closures related to weak references processing 1698 1699 // During CMS' weak reference processing, this is a 1700 // work-routine/closure used to complete transitive 1701 // marking of objects as live after a certain point 1702 // in which an initial set has been completely accumulated. 1703 // This closure is currently used both during the final 1704 // remark stop-world phase, as well as during the concurrent 1705 // precleaning of the discovered reference lists. 1706 class CMSDrainMarkingStackClosure: public VoidClosure { 1707 CMSCollector* _collector; 1708 MemRegion _span; 1709 CMSMarkStack* _mark_stack; 1710 CMSBitMap* _bit_map; 1711 CMSKeepAliveClosure* _keep_alive; 1712 bool _concurrent_precleaning; 1713 public: 1714 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1715 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1716 CMSKeepAliveClosure* keep_alive, 1717 bool cpc): 1718 _collector(collector), 1719 _span(span), 1720 _bit_map(bit_map), 1721 _mark_stack(mark_stack), 1722 _keep_alive(keep_alive), 1723 _concurrent_precleaning(cpc) { 1724 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1725 "Mismatch"); 1726 } 1727 1728 void do_void(); 1729 }; 1730 1731 // A parallel version of CMSDrainMarkingStackClosure above. 1732 class CMSParDrainMarkingStackClosure: public VoidClosure { 1733 CMSCollector* _collector; 1734 MemRegion _span; 1735 OopTaskQueue* _work_queue; 1736 CMSBitMap* _bit_map; 1737 CMSInnerParMarkAndPushClosure _mark_and_push; 1738 1739 public: 1740 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1741 MemRegion span, CMSBitMap* bit_map, 1742 OopTaskQueue* work_queue): 1743 _collector(collector), 1744 _span(span), 1745 _bit_map(bit_map), 1746 _work_queue(work_queue), 1747 _mark_and_push(collector, span, bit_map, work_queue) { } 1748 1749 public: 1750 void trim_queue(uint max); 1751 void do_void(); 1752 }; 1753 1754 // Allow yielding or short-circuiting of reference list 1755 // precleaning work. 1756 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1757 CMSCollector* _collector; 1758 void do_yield_work(); 1759 public: 1760 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1761 _collector(collector) {} 1762 virtual bool should_return(); 1763 }; 1764 1765 1766 // Convenience class that locks free list locks for given CMS collector 1767 class FreelistLocker: public StackObj { 1768 private: 1769 CMSCollector* _collector; 1770 public: 1771 FreelistLocker(CMSCollector* collector): 1772 _collector(collector) { 1773 _collector->getFreelistLocks(); 1774 } 1775 1776 ~FreelistLocker() { 1777 _collector->releaseFreelistLocks(); 1778 } 1779 }; 1780 1781 // Mark all dead objects in a given space. 1782 class MarkDeadObjectsClosure: public BlkClosure { 1783 const CMSCollector* _collector; 1784 const CompactibleFreeListSpace* _sp; 1785 CMSBitMap* _live_bit_map; 1786 CMSBitMap* _dead_bit_map; 1787 public: 1788 MarkDeadObjectsClosure(const CMSCollector* collector, 1789 const CompactibleFreeListSpace* sp, 1790 CMSBitMap *live_bit_map, 1791 CMSBitMap *dead_bit_map) : 1792 _collector(collector), 1793 _sp(sp), 1794 _live_bit_map(live_bit_map), 1795 _dead_bit_map(dead_bit_map) {} 1796 size_t do_blk(HeapWord* addr); 1797 }; 1798 1799 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1800 1801 public: 1802 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1803 }; 1804 1805 1806 #endif // SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP