1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1RegionToSpaceMapper.hpp" 30 #include "gc/g1/heapRegionSet.hpp" 31 #include "gc/shared/taskqueue.hpp" 32 33 class G1CollectedHeap; 34 class G1CMBitMap; 35 class G1CMTask; 36 class G1ConcurrentMark; 37 typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue; 38 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 39 40 // Closure used by CM during concurrent reference discovery 41 // and reference processing (during remarking) to determine 42 // if a particular object is alive. It is primarily used 43 // to determine if referents of discovered reference objects 44 // are alive. An instance is also embedded into the 45 // reference processor as the _is_alive_non_header field 46 class G1CMIsAliveClosure: public BoolObjectClosure { 47 G1CollectedHeap* _g1; 48 public: 49 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 50 51 bool do_object_b(oop obj); 52 }; 53 54 // A generic CM bit map. This is essentially a wrapper around the BitMap 55 // class, with one bit per (1<<_shifter) HeapWords. 56 57 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC { 58 protected: 59 HeapWord* _bmStartWord; // base address of range covered by map 60 size_t _bmWordSize; // map size (in #HeapWords covered) 61 const int _shifter; // map to char or bit 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 G1CMBitMapRO(int shifter); 67 68 // inquiries 69 HeapWord* startWord() const { return _bmStartWord; } 70 // the following is one past the last word in space 71 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 72 73 // read marks 74 75 bool isMarked(HeapWord* addr) const { 76 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 77 "outside underlying space?"); 78 return _bm.at(heapWordToOffset(addr)); 79 } 80 81 // iteration 82 inline bool iterate(BitMapClosure* cl, MemRegion mr); 83 84 // Return the address corresponding to the next marked bit at or after 85 // "addr", and before "limit", if "limit" is non-NULL. If there is no 86 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 87 HeapWord* getNextMarkedWordAddress(const HeapWord* addr, 88 const HeapWord* limit = NULL) const; 89 90 // conversion utilities 91 HeapWord* offsetToHeapWord(size_t offset) const { 92 return _bmStartWord + (offset << _shifter); 93 } 94 size_t heapWordToOffset(const HeapWord* addr) const { 95 return pointer_delta(addr, _bmStartWord) >> _shifter; 96 } 97 98 // The argument addr should be the start address of a valid object 99 inline HeapWord* nextObject(HeapWord* addr); 100 101 void print_on_error(outputStream* st, const char* prefix) const; 102 103 // debugging 104 NOT_PRODUCT(bool covers(MemRegion rs) const;) 105 }; 106 107 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener { 108 private: 109 G1CMBitMap* _bm; 110 public: 111 G1CMBitMapMappingChangedListener() : _bm(NULL) {} 112 113 void set_bitmap(G1CMBitMap* bm) { _bm = bm; } 114 115 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); 116 }; 117 118 class G1CMBitMap : public G1CMBitMapRO { 119 private: 120 G1CMBitMapMappingChangedListener _listener; 121 122 public: 123 static size_t compute_size(size_t heap_size); 124 // Returns the amount of bytes on the heap between two marks in the bitmap. 125 static size_t mark_distance(); 126 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the 127 // mark bitmap corresponds to. This is the same as the mark distance above. 128 static size_t heap_map_factor() { 129 return mark_distance(); 130 } 131 132 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } 133 134 // Initializes the underlying BitMap to cover the given area. 135 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); 136 137 // Write marks. 138 inline void mark(HeapWord* addr); 139 inline void clear(HeapWord* addr); 140 inline bool parMark(HeapWord* addr); 141 142 void clear_range(MemRegion mr); 143 }; 144 145 // Represents a marking stack used by ConcurrentMarking in the G1 collector. 146 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { 147 VirtualSpace _virtual_space; // Underlying backing store for actual stack 148 G1ConcurrentMark* _cm; 149 oop* _base; // bottom of stack 150 jint _index; // one more than last occupied index 151 jint _capacity; // max #elements 152 jint _saved_index; // value of _index saved at start of GC 153 154 bool _overflow; 155 bool _should_expand; 156 157 public: 158 G1CMMarkStack(G1ConcurrentMark* cm); 159 ~G1CMMarkStack(); 160 161 bool allocate(size_t capacity); 162 163 // Pushes the first "n" elements of "ptr_arr" on the stack. 164 // Locking impl: concurrency is allowed only with 165 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 166 // locking strategy. 167 void par_push_arr(oop* ptr_arr, int n); 168 169 // If returns false, the array was empty. Otherwise, removes up to "max" 170 // elements from the stack, and transfers them to "ptr_arr" in an 171 // unspecified order. The actual number transferred is given in "n" ("n 172 // == 0" is deliberately redundant with the return value.) Locking impl: 173 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 174 // operations, which use the same locking strategy. 175 bool par_pop_arr(oop* ptr_arr, int max, int* n); 176 177 bool isEmpty() { return _index == 0; } 178 int maxElems() { return _capacity; } 179 180 bool overflow() { return _overflow; } 181 void clear_overflow() { _overflow = false; } 182 183 bool should_expand() const { return _should_expand; } 184 void set_should_expand(); 185 186 // Expand the stack, typically in response to an overflow condition 187 void expand(); 188 189 int size() { return _index; } 190 191 void setEmpty() { _index = 0; clear_overflow(); } 192 193 // Record the current index. 194 void note_start_of_gc(); 195 196 // Make sure that we have not added any entries to the stack during GC. 197 void note_end_of_gc(); 198 199 // Apply fn to each oop in the mark stack, up to the bound recorded 200 // via one of the above "note" functions. The mark stack must not 201 // be modified while iterating. 202 template<typename Fn> void iterate(Fn fn); 203 }; 204 205 class YoungList; 206 207 // Root Regions are regions that are not empty at the beginning of a 208 // marking cycle and which we might collect during an evacuation pause 209 // while the cycle is active. Given that, during evacuation pauses, we 210 // do not copy objects that are explicitly marked, what we have to do 211 // for the root regions is to scan them and mark all objects reachable 212 // from them. According to the SATB assumptions, we only need to visit 213 // each object once during marking. So, as long as we finish this scan 214 // before the next evacuation pause, we can copy the objects from the 215 // root regions without having to mark them or do anything else to them. 216 // 217 // Currently, we only support root region scanning once (at the start 218 // of the marking cycle) and the root regions are all the survivor 219 // regions populated during the initial-mark pause. 220 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { 221 private: 222 YoungList* _young_list; 223 G1ConcurrentMark* _cm; 224 225 volatile bool _scan_in_progress; 226 volatile bool _should_abort; 227 HeapRegion* volatile _next_survivor; 228 229 void notify_scan_done(); 230 231 public: 232 G1CMRootRegions(); 233 // We actually do most of the initialization in this method. 234 void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm); 235 236 // Reset the claiming / scanning of the root regions. 237 void prepare_for_scan(); 238 239 // Forces get_next() to return NULL so that the iteration aborts early. 240 void abort() { _should_abort = true; } 241 242 // Return true if the CM thread are actively scanning root regions, 243 // false otherwise. 244 bool scan_in_progress() { return _scan_in_progress; } 245 246 // Claim the next root region to scan atomically, or return NULL if 247 // all have been claimed. 248 HeapRegion* claim_next(); 249 250 void cancel_scan(); 251 252 // Flag that we're done with root region scanning and notify anyone 253 // who's waiting on it. If aborted is false, assume that all regions 254 // have been claimed. 255 void scan_finished(); 256 257 // If CM threads are still scanning root regions, wait until they 258 // are done. Return true if we had to wait, false otherwise. 259 bool wait_until_scan_finished(); 260 }; 261 262 class ConcurrentMarkThread; 263 264 class G1ConcurrentMark: public CHeapObj<mtGC> { 265 friend class ConcurrentMarkThread; 266 friend class G1ParNoteEndTask; 267 friend class G1VerifyLiveDataClosure; 268 friend class G1CMRefProcTaskProxy; 269 friend class G1CMRefProcTaskExecutor; 270 friend class G1CMKeepAliveAndDrainClosure; 271 friend class G1CMDrainMarkingStackClosure; 272 friend class G1CMBitMapClosure; 273 friend class G1CMConcurrentMarkingTask; 274 friend class G1CMMarkStack; 275 friend class G1CMRemarkTask; 276 friend class G1CMTask; 277 278 protected: 279 ConcurrentMarkThread* _cmThread; // The thread doing the work 280 G1CollectedHeap* _g1h; // The heap 281 uint _parallel_marking_threads; // The number of marking 282 // threads we're using 283 uint _max_parallel_marking_threads; // Max number of marking 284 // threads we'll ever use 285 double _sleep_factor; // How much we have to sleep, with 286 // respect to the work we just did, to 287 // meet the marking overhead goal 288 double _marking_task_overhead; // Marking target overhead for 289 // a single task 290 291 FreeRegionList _cleanup_list; 292 293 // Concurrent marking support structures 294 G1CMBitMap _markBitMap1; 295 G1CMBitMap _markBitMap2; 296 G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap 297 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 298 299 // Liveness count data. After marking G1 iterates over the recently gathered mark 300 // bitmap and records rough information about liveness on card and region basis. 301 // This information can be used for e.g. remembered set scrubbing. 302 303 // A set bit indicates whether the given region contains any live object. 304 BitMap _region_live_bm; 305 // A set bit indicates that the given card contains a live object. 306 BitMap _card_live_bm; 307 308 // Heap bounds 309 HeapWord* _heap_start; 310 HeapWord* _heap_end; 311 312 // Root region tracking and claiming 313 G1CMRootRegions _root_regions; 314 315 // For gray objects 316 G1CMMarkStack _markStack; // Grey objects behind global finger 317 HeapWord* volatile _finger; // The global finger, region aligned, 318 // always points to the end of the 319 // last claimed region 320 321 // Marking tasks 322 uint _max_worker_id;// Maximum worker id 323 uint _active_tasks; // Task num currently active 324 G1CMTask** _tasks; // Task queue array (max_worker_id len) 325 G1CMTaskQueueSet* _task_queues; // Task queue set 326 ParallelTaskTerminator _terminator; // For termination 327 328 // Two sync barriers that are used to synchronize tasks when an 329 // overflow occurs. The algorithm is the following. All tasks enter 330 // the first one to ensure that they have all stopped manipulating 331 // the global data structures. After they exit it, they re-initialize 332 // their data structures and task 0 re-initializes the global data 333 // structures. Then, they enter the second sync barrier. This 334 // ensure, that no task starts doing work before all data 335 // structures (local and global) have been re-initialized. When they 336 // exit it, they are free to start working again. 337 WorkGangBarrierSync _first_overflow_barrier_sync; 338 WorkGangBarrierSync _second_overflow_barrier_sync; 339 340 // This is set by any task, when an overflow on the global data 341 // structures is detected 342 volatile bool _has_overflown; 343 // True: marking is concurrent, false: we're in remark 344 volatile bool _concurrent; 345 // Set at the end of a Full GC so that marking aborts 346 volatile bool _has_aborted; 347 348 // Used when remark aborts due to an overflow to indicate that 349 // another concurrent marking phase should start 350 volatile bool _restart_for_overflow; 351 352 // This is true from the very start of concurrent marking until the 353 // point when all the tasks complete their work. It is really used 354 // to determine the points between the end of concurrent marking and 355 // time of remark. 356 volatile bool _concurrent_marking_in_progress; 357 358 // There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort()) 359 // to call ConcurrentGCTimer::register_gc_concurrent_end(). 360 // And this variable is used to keep track of concurrent phase. 361 volatile uint _concurrent_phase_status; 362 // Concurrent phase is not yet started. 363 static const uint ConcPhaseNotStarted = 0; 364 // Concurrent phase is started. 365 static const uint ConcPhaseStarted = 1; 366 // Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase. 367 // So other thread should wait until the status to be changed to ConcPhaseNotStarted. 368 static const uint ConcPhaseStopping = 2; 369 370 // All of these times are in ms 371 NumberSeq _init_times; 372 NumberSeq _remark_times; 373 NumberSeq _remark_mark_times; 374 NumberSeq _remark_weak_ref_times; 375 NumberSeq _cleanup_times; 376 double _total_counting_time; 377 double _total_rs_scrub_time; 378 379 double* _accum_task_vtime; // Accumulated task vtime 380 381 WorkGang* _parallel_workers; 382 383 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 384 void weakRefsWork(bool clear_all_soft_refs); 385 386 void swapMarkBitMaps(); 387 388 // Allocates and returns a zero-ed out "large" bitmap of the given size in bits. 389 // It is always allocated using virtual memory. 390 BitMap allocate_large_bitmap(BitMap::idx_t size_in_bits); 391 // Allocates the memory for all bitmaps used by the concurrent marking. 392 void allocate_internal_bitmaps(); 393 // Pre-touches the internal bitmaps. 394 void pretouch_internal_bitmaps(); 395 396 // It resets the global marking data structures, as well as the 397 // task local ones; should be called during initial mark. 398 void reset(); 399 400 // Resets all the marking data structures. Called when we have to restart 401 // marking or when marking completes (via set_non_marking_state below). 402 void reset_marking_state(bool clear_overflow = true); 403 404 // We do this after we're done with marking so that the marking data 405 // structures are initialized to a sensible and predictable state. 406 void set_non_marking_state(); 407 408 // Called to indicate how many threads are currently active. 409 void set_concurrency(uint active_tasks); 410 411 // It should be called to indicate which phase we're in (concurrent 412 // mark or remark) and how many threads are currently active. 413 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 414 415 // Prints all gathered CM-related statistics 416 void print_stats(); 417 418 bool cleanup_list_is_empty() { 419 return _cleanup_list.is_empty(); 420 } 421 422 // Accessor methods 423 uint parallel_marking_threads() const { return _parallel_marking_threads; } 424 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 425 double sleep_factor() { return _sleep_factor; } 426 double marking_task_overhead() { return _marking_task_overhead;} 427 428 HeapWord* finger() { return _finger; } 429 bool concurrent() { return _concurrent; } 430 uint active_tasks() { return _active_tasks; } 431 ParallelTaskTerminator* terminator() { return &_terminator; } 432 433 // It claims the next available region to be scanned by a marking 434 // task/thread. It might return NULL if the next region is empty or 435 // we have run out of regions. In the latter case, out_of_regions() 436 // determines whether we've really run out of regions or the task 437 // should call claim_region() again. This might seem a bit 438 // awkward. Originally, the code was written so that claim_region() 439 // either successfully returned with a non-empty region or there 440 // were no more regions to be claimed. The problem with this was 441 // that, in certain circumstances, it iterated over large chunks of 442 // the heap finding only empty regions and, while it was working, it 443 // was preventing the calling task to call its regular clock 444 // method. So, this way, each task will spend very little time in 445 // claim_region() and is allowed to call the regular clock method 446 // frequently. 447 HeapRegion* claim_region(uint worker_id); 448 449 // It determines whether we've run out of regions to scan. Note that 450 // the finger can point past the heap end in case the heap was expanded 451 // to satisfy an allocation without doing a GC. This is fine, because all 452 // objects in those regions will be considered live anyway because of 453 // SATB guarantees (i.e. their TAMS will be equal to bottom). 454 bool out_of_regions() { return _finger >= _heap_end; } 455 456 // Returns the task with the given id 457 G1CMTask* task(int id) { 458 assert(0 <= id && id < (int) _active_tasks, 459 "task id not within active bounds"); 460 return _tasks[id]; 461 } 462 463 // Returns the task queue with the given id 464 G1CMTaskQueue* task_queue(int id) { 465 assert(0 <= id && id < (int) _active_tasks, 466 "task queue id not within active bounds"); 467 return (G1CMTaskQueue*) _task_queues->queue(id); 468 } 469 470 // Returns the task queue set 471 G1CMTaskQueueSet* task_queues() { return _task_queues; } 472 473 // Access / manipulation of the overflow flag which is set to 474 // indicate that the global stack has overflown 475 bool has_overflown() { return _has_overflown; } 476 void set_has_overflown() { _has_overflown = true; } 477 void clear_has_overflown() { _has_overflown = false; } 478 bool restart_for_overflow() { return _restart_for_overflow; } 479 480 // Methods to enter the two overflow sync barriers 481 void enter_first_sync_barrier(uint worker_id); 482 void enter_second_sync_barrier(uint worker_id); 483 484 // Card index of the bottom of the G1 heap. Used for biasing indices into 485 // the card bitmaps. 486 intptr_t _heap_bottom_card_num; 487 488 // Set to true when initialization is complete 489 bool _completed_initialization; 490 491 // end_timer, true to end gc timer after ending concurrent phase. 492 void register_concurrent_phase_end_common(bool end_timer); 493 494 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 495 // true, periodically insert checks to see if this method should exit prematurely. 496 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 497 public: 498 // Manipulation of the global mark stack. 499 // The push and pop operations are used by tasks for transfers 500 // between task-local queues and the global mark stack, and use 501 // locking for concurrency safety. 502 bool mark_stack_push(oop* arr, int n) { 503 _markStack.par_push_arr(arr, n); 504 if (_markStack.overflow()) { 505 set_has_overflown(); 506 return false; 507 } 508 return true; 509 } 510 void mark_stack_pop(oop* arr, int max, int* n) { 511 _markStack.par_pop_arr(arr, max, n); 512 } 513 size_t mark_stack_size() { return _markStack.size(); } 514 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 515 bool mark_stack_overflow() { return _markStack.overflow(); } 516 bool mark_stack_empty() { return _markStack.isEmpty(); } 517 518 G1CMRootRegions* root_regions() { return &_root_regions; } 519 520 bool concurrent_marking_in_progress() { 521 return _concurrent_marking_in_progress; 522 } 523 void set_concurrent_marking_in_progress() { 524 _concurrent_marking_in_progress = true; 525 } 526 void clear_concurrent_marking_in_progress() { 527 _concurrent_marking_in_progress = false; 528 } 529 530 void register_concurrent_phase_start(const char* title); 531 void register_concurrent_phase_end(); 532 // Ends both concurrent phase and timer. 533 void register_concurrent_gc_end_and_stop_timer(); 534 535 void update_accum_task_vtime(int i, double vtime) { 536 _accum_task_vtime[i] += vtime; 537 } 538 539 double all_task_accum_vtime() { 540 double ret = 0.0; 541 for (uint i = 0; i < _max_worker_id; ++i) 542 ret += _accum_task_vtime[i]; 543 return ret; 544 } 545 546 // Attempts to steal an object from the task queues of other tasks 547 bool try_stealing(uint worker_id, int* hash_seed, oop& obj); 548 549 G1ConcurrentMark(G1CollectedHeap* g1h, 550 G1RegionToSpaceMapper* prev_bitmap_storage, 551 G1RegionToSpaceMapper* next_bitmap_storage); 552 ~G1ConcurrentMark(); 553 554 ConcurrentMarkThread* cmThread() { return _cmThread; } 555 556 G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 557 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 558 559 // Returns the number of GC threads to be used in a concurrent 560 // phase based on the number of GC threads being used in a STW 561 // phase. 562 uint scale_parallel_threads(uint n_par_threads); 563 564 // Calculates the number of GC threads to be used in a concurrent phase. 565 uint calc_parallel_marking_threads(); 566 567 // The following three are interaction between CM and 568 // G1CollectedHeap 569 570 // This notifies CM that a root during initial-mark needs to be 571 // grayed. It is MT-safe. hr is the region that 572 // contains the object and it's passed optionally from callers who 573 // might already have it (no point in recalculating it). 574 inline void grayRoot(oop obj, 575 HeapRegion* hr = NULL); 576 577 // Prepare internal data structures for the next mark cycle. This includes clearing 578 // the next mark bitmap and some internal data structures. This method is intended 579 // to be called concurrently to the mutator. It will yield to safepoint requests. 580 void cleanup_for_next_mark(); 581 582 // Clear the previous marking bitmap during safepoint. 583 void clear_prev_bitmap(WorkGang* workers); 584 585 // Return whether the next mark bitmap has no marks set. To be used for assertions 586 // only. Will not yield to pause requests. 587 bool nextMarkBitmapIsClear(); 588 589 // These two do the work that needs to be done before and after the 590 // initial root checkpoint. Since this checkpoint can be done at two 591 // different points (i.e. an explicit pause or piggy-backed on a 592 // young collection), then it's nice to be able to easily share the 593 // pre/post code. It might be the case that we can put everything in 594 // the post method. TP 595 void checkpointRootsInitialPre(); 596 void checkpointRootsInitialPost(); 597 598 // Scan all the root regions and mark everything reachable from 599 // them. 600 void scan_root_regions(); 601 602 // Scan a single root region and mark everything reachable from it. 603 void scanRootRegion(HeapRegion* hr, uint worker_id); 604 605 // Do concurrent phase of marking, to a tentative transitive closure. 606 void mark_from_roots(); 607 608 void checkpointRootsFinal(bool clear_all_soft_refs); 609 void checkpointRootsFinalWork(); 610 void cleanup(); 611 void complete_cleanup(); 612 613 // Mark in the previous bitmap. NB: this is usually read-only, so use 614 // this carefully! 615 inline void markPrev(oop p); 616 617 // Clears marks for all objects in the given range, for the prev or 618 // next bitmaps. NB: the previous bitmap is usually 619 // read-only, so use this carefully! 620 void clearRangePrevBitmap(MemRegion mr); 621 622 // Notify data structures that a GC has started. 623 void note_start_of_gc() { 624 _markStack.note_start_of_gc(); 625 } 626 627 // Notify data structures that a GC is finished. 628 void note_end_of_gc() { 629 _markStack.note_end_of_gc(); 630 } 631 632 // Verify that there are no CSet oops on the stacks (taskqueues / 633 // global mark stack) and fingers (global / per-task). 634 // If marking is not in progress, it's a no-op. 635 void verify_no_cset_oops() PRODUCT_RETURN; 636 637 inline bool isPrevMarked(oop p) const; 638 639 inline bool do_yield_check(uint worker_i = 0); 640 641 // Abandon current marking iteration due to a Full GC. 642 void abort(); 643 644 bool has_aborted() { return _has_aborted; } 645 646 void print_summary_info(); 647 648 void print_worker_threads_on(outputStream* st) const; 649 650 void print_on_error(outputStream* st) const; 651 652 // Returns the card number of the bottom of the G1 heap. 653 // Used in biasing indices into accounting card bitmaps. 654 intptr_t heap_bottom_card_num() const { 655 return _heap_bottom_card_num; 656 } 657 658 // Returns the index in the per-card liveness count bitmap 659 // for the given address 660 inline BitMap::idx_t card_live_bitmap_index_for(HeapWord* addr); 661 662 // Attempts to mark the given object on the next mark bitmap. 663 inline bool par_mark(oop obj); 664 665 // Returns true if initialization was successfully completed. 666 bool completed_initialization() const { 667 return _completed_initialization; 668 } 669 670 private: 671 // Clear (Reset) all liveness count data. 672 void clear_all_live_data(WorkGang* workers); 673 674 // Verify all of the above data structures that they are in initial state. 675 void verify_all_live_data(); 676 677 // Aggregates the per-card liveness data based on the current marking. Also sets 678 // the amount of marked bytes for each region. 679 void create_live_data(); 680 681 // Verification routine 682 void verify_live_data(); 683 }; 684 685 // A class representing a marking task. 686 class G1CMTask : public TerminatorTerminator { 687 private: 688 enum PrivateConstants { 689 // the regular clock call is called once the scanned words reaches 690 // this limit 691 words_scanned_period = 12*1024, 692 // the regular clock call is called once the number of visited 693 // references reaches this limit 694 refs_reached_period = 384, 695 // initial value for the hash seed, used in the work stealing code 696 init_hash_seed = 17, 697 // how many entries will be transferred between global stack and 698 // local queues 699 global_stack_transfer_size = 16 700 }; 701 702 uint _worker_id; 703 G1CollectedHeap* _g1h; 704 G1ConcurrentMark* _cm; 705 G1CMBitMap* _nextMarkBitMap; 706 // the task queue of this task 707 G1CMTaskQueue* _task_queue; 708 private: 709 // the task queue set---needed for stealing 710 G1CMTaskQueueSet* _task_queues; 711 // indicates whether the task has been claimed---this is only for 712 // debugging purposes 713 bool _claimed; 714 715 // number of calls to this task 716 int _calls; 717 718 // when the virtual timer reaches this time, the marking step should 719 // exit 720 double _time_target_ms; 721 // the start time of the current marking step 722 double _start_time_ms; 723 724 // the oop closure used for iterations over oops 725 G1CMOopClosure* _cm_oop_closure; 726 727 // the region this task is scanning, NULL if we're not scanning any 728 HeapRegion* _curr_region; 729 // the local finger of this task, NULL if we're not scanning a region 730 HeapWord* _finger; 731 // limit of the region this task is scanning, NULL if we're not scanning one 732 HeapWord* _region_limit; 733 734 // the number of words this task has scanned 735 size_t _words_scanned; 736 // When _words_scanned reaches this limit, the regular clock is 737 // called. Notice that this might be decreased under certain 738 // circumstances (i.e. when we believe that we did an expensive 739 // operation). 740 size_t _words_scanned_limit; 741 // the initial value of _words_scanned_limit (i.e. what it was 742 // before it was decreased). 743 size_t _real_words_scanned_limit; 744 745 // the number of references this task has visited 746 size_t _refs_reached; 747 // When _refs_reached reaches this limit, the regular clock is 748 // called. Notice this this might be decreased under certain 749 // circumstances (i.e. when we believe that we did an expensive 750 // operation). 751 size_t _refs_reached_limit; 752 // the initial value of _refs_reached_limit (i.e. what it was before 753 // it was decreased). 754 size_t _real_refs_reached_limit; 755 756 // used by the work stealing stuff 757 int _hash_seed; 758 // if this is true, then the task has aborted for some reason 759 bool _has_aborted; 760 // set when the task aborts because it has met its time quota 761 bool _has_timed_out; 762 // true when we're draining SATB buffers; this avoids the task 763 // aborting due to SATB buffers being available (as we're already 764 // dealing with them) 765 bool _draining_satb_buffers; 766 767 // number sequence of past step times 768 NumberSeq _step_times_ms; 769 // elapsed time of this task 770 double _elapsed_time_ms; 771 // termination time of this task 772 double _termination_time_ms; 773 // when this task got into the termination protocol 774 double _termination_start_time_ms; 775 776 // true when the task is during a concurrent phase, false when it is 777 // in the remark phase (so, in the latter case, we do not have to 778 // check all the things that we have to check during the concurrent 779 // phase, i.e. SATB buffer availability...) 780 bool _concurrent; 781 782 TruncatedSeq _marking_step_diffs_ms; 783 784 // it updates the local fields after this task has claimed 785 // a new region to scan 786 void setup_for_region(HeapRegion* hr); 787 // it brings up-to-date the limit of the region 788 void update_region_limit(); 789 790 // called when either the words scanned or the refs visited limit 791 // has been reached 792 void reached_limit(); 793 // recalculates the words scanned and refs visited limits 794 void recalculate_limits(); 795 // decreases the words scanned and refs visited limits when we reach 796 // an expensive operation 797 void decrease_limits(); 798 // it checks whether the words scanned or refs visited reached their 799 // respective limit and calls reached_limit() if they have 800 void check_limits() { 801 if (_words_scanned >= _words_scanned_limit || 802 _refs_reached >= _refs_reached_limit) { 803 reached_limit(); 804 } 805 } 806 // this is supposed to be called regularly during a marking step as 807 // it checks a bunch of conditions that might cause the marking step 808 // to abort 809 void regular_clock_call(); 810 bool concurrent() { return _concurrent; } 811 812 // Test whether obj might have already been passed over by the 813 // mark bitmap scan, and so needs to be pushed onto the mark stack. 814 bool is_below_finger(oop obj, HeapWord* global_finger) const; 815 816 template<bool scan> void process_grey_object(oop obj); 817 818 public: 819 // It resets the task; it should be called right at the beginning of 820 // a marking phase. 821 void reset(G1CMBitMap* _nextMarkBitMap); 822 // it clears all the fields that correspond to a claimed region. 823 void clear_region_fields(); 824 825 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 826 827 // The main method of this class which performs a marking step 828 // trying not to exceed the given duration. However, it might exit 829 // prematurely, according to some conditions (i.e. SATB buffers are 830 // available for processing). 831 void do_marking_step(double target_ms, 832 bool do_termination, 833 bool is_serial); 834 835 // These two calls start and stop the timer 836 void record_start_time() { 837 _elapsed_time_ms = os::elapsedTime() * 1000.0; 838 } 839 void record_end_time() { 840 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 841 } 842 843 // returns the worker ID associated with this task. 844 uint worker_id() { return _worker_id; } 845 846 // From TerminatorTerminator. It determines whether this task should 847 // exit the termination protocol after it's entered it. 848 virtual bool should_exit_termination(); 849 850 // Resets the local region fields after a task has finished scanning a 851 // region; or when they have become stale as a result of the region 852 // being evacuated. 853 void giveup_current_region(); 854 855 HeapWord* finger() { return _finger; } 856 857 bool has_aborted() { return _has_aborted; } 858 void set_has_aborted() { _has_aborted = true; } 859 void clear_has_aborted() { _has_aborted = false; } 860 bool has_timed_out() { return _has_timed_out; } 861 bool claimed() { return _claimed; } 862 863 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 864 865 // Increment the number of references this task has visited. 866 void increment_refs_reached() { ++_refs_reached; } 867 868 // Grey the object by marking it. If not already marked, push it on 869 // the local queue if below the finger. 870 // Precondition: obj is below region's NTAMS. 871 inline void make_reference_grey(oop obj); 872 873 // Grey the object (by calling make_grey_reference) if required, 874 // e.g. obj is below its containing region's NTAMS. 875 // Precondition: obj is a valid heap object. 876 inline void deal_with_reference(oop obj); 877 878 // It scans an object and visits its children. 879 inline void scan_object(oop obj); 880 881 // It pushes an object on the local queue. 882 inline void push(oop obj); 883 884 // These two move entries to/from the global stack. 885 void move_entries_to_global_stack(); 886 void get_entries_from_global_stack(); 887 888 // It pops and scans objects from the local queue. If partially is 889 // true, then it stops when the queue size is of a given limit. If 890 // partially is false, then it stops when the queue is empty. 891 void drain_local_queue(bool partially); 892 // It moves entries from the global stack to the local queue and 893 // drains the local queue. If partially is true, then it stops when 894 // both the global stack and the local queue reach a given size. If 895 // partially if false, it tries to empty them totally. 896 void drain_global_stack(bool partially); 897 // It keeps picking SATB buffers and processing them until no SATB 898 // buffers are available. 899 void drain_satb_buffers(); 900 901 // moves the local finger to a new location 902 inline void move_finger_to(HeapWord* new_finger) { 903 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 904 _finger = new_finger; 905 } 906 907 G1CMTask(uint worker_id, 908 G1ConcurrentMark *cm, 909 G1CMTaskQueue* task_queue, 910 G1CMTaskQueueSet* task_queues); 911 912 // it prints statistics associated with this task 913 void print_stats(); 914 }; 915 916 // Class that's used to to print out per-region liveness 917 // information. It's currently used at the end of marking and also 918 // after we sort the old regions at the end of the cleanup operation. 919 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 920 private: 921 // Accumulators for these values. 922 size_t _total_used_bytes; 923 size_t _total_capacity_bytes; 924 size_t _total_prev_live_bytes; 925 size_t _total_next_live_bytes; 926 927 // These are set up when we come across a "stars humongous" region 928 // (as this is where most of this information is stored, not in the 929 // subsequent "continues humongous" regions). After that, for every 930 // region in a given humongous region series we deduce the right 931 // values for it by simply subtracting the appropriate amount from 932 // these fields. All these values should reach 0 after we've visited 933 // the last region in the series. 934 size_t _hum_used_bytes; 935 size_t _hum_capacity_bytes; 936 size_t _hum_prev_live_bytes; 937 size_t _hum_next_live_bytes; 938 939 // Accumulator for the remembered set size 940 size_t _total_remset_bytes; 941 942 // Accumulator for strong code roots memory size 943 size_t _total_strong_code_roots_bytes; 944 945 static double perc(size_t val, size_t total) { 946 if (total == 0) { 947 return 0.0; 948 } else { 949 return 100.0 * ((double) val / (double) total); 950 } 951 } 952 953 static double bytes_to_mb(size_t val) { 954 return (double) val / (double) M; 955 } 956 957 // See the .cpp file. 958 size_t get_hum_bytes(size_t* hum_bytes); 959 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 960 size_t* prev_live_bytes, size_t* next_live_bytes); 961 962 public: 963 // The header and footer are printed in the constructor and 964 // destructor respectively. 965 G1PrintRegionLivenessInfoClosure(const char* phase_name); 966 virtual bool doHeapRegion(HeapRegion* r); 967 ~G1PrintRegionLivenessInfoClosure(); 968 }; 969 970 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP