1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1RegionToSpaceMapper.hpp" 30 #include "gc/g1/heapRegionSet.hpp" 31 #include "gc/shared/taskqueue.hpp" 32 33 class G1CollectedHeap; 34 class G1CMBitMap; 35 class G1CMTask; 36 class G1ConcurrentMark; 37 class ConcurrentGCTimer; 38 class G1OldTracer; 39 class G1SurvivorRegions; 40 typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue; 41 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 42 43 // Closure used by CM during concurrent reference discovery 44 // and reference processing (during remarking) to determine 45 // if a particular object is alive. It is primarily used 46 // to determine if referents of discovered reference objects 47 // are alive. An instance is also embedded into the 48 // reference processor as the _is_alive_non_header field 49 class G1CMIsAliveClosure: public BoolObjectClosure { 50 G1CollectedHeap* _g1; 51 public: 52 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 53 54 bool do_object_b(oop obj); 55 }; 56 57 // A generic CM bit map. This is essentially a wrapper around the BitMap 58 // class, with one bit per (1<<_shifter) HeapWords. 59 60 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC { 61 protected: 62 HeapWord* _bmStartWord; // base address of range covered by map 63 size_t _bmWordSize; // map size (in #HeapWords covered) 64 const int _shifter; // map to char or bit 65 BitMapView _bm; // the bit map itself 66 67 public: 68 // constructor 69 G1CMBitMapRO(int shifter); 70 71 // inquiries 72 HeapWord* startWord() const { return _bmStartWord; } 73 // the following is one past the last word in space 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 75 76 // read marks 77 78 bool isMarked(HeapWord* addr) const { 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 80 "outside underlying space?"); 81 return _bm.at(heapWordToOffset(addr)); 82 } 83 84 // iteration 85 inline bool iterate(BitMapClosure* cl, MemRegion mr); 86 87 // Return the address corresponding to the next marked bit at or after 88 // "addr", and before "limit", if "limit" is non-NULL. If there is no 89 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 90 HeapWord* getNextMarkedWordAddress(const HeapWord* addr, 91 const HeapWord* limit = NULL) const; 92 93 // conversion utilities 94 HeapWord* offsetToHeapWord(size_t offset) const { 95 return _bmStartWord + (offset << _shifter); 96 } 97 size_t heapWordToOffset(const HeapWord* addr) const { 98 return pointer_delta(addr, _bmStartWord) >> _shifter; 99 } 100 101 // The argument addr should be the start address of a valid object 102 inline HeapWord* nextObject(HeapWord* addr); 103 104 void print_on_error(outputStream* st, const char* prefix) const; 105 106 // debugging 107 NOT_PRODUCT(bool covers(MemRegion rs) const;) 108 }; 109 110 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener { 111 private: 112 G1CMBitMap* _bm; 113 public: 114 G1CMBitMapMappingChangedListener() : _bm(NULL) {} 115 116 void set_bitmap(G1CMBitMap* bm) { _bm = bm; } 117 118 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); 119 }; 120 121 class G1CMBitMap : public G1CMBitMapRO { 122 private: 123 G1CMBitMapMappingChangedListener _listener; 124 125 public: 126 static size_t compute_size(size_t heap_size); 127 // Returns the amount of bytes on the heap between two marks in the bitmap. 128 static size_t mark_distance(); 129 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the 130 // mark bitmap corresponds to. This is the same as the mark distance above. 131 static size_t heap_map_factor() { 132 return mark_distance(); 133 } 134 135 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } 136 137 // Initializes the underlying BitMap to cover the given area. 138 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); 139 140 // Write marks. 141 inline void mark(HeapWord* addr); 142 inline void clear(HeapWord* addr); 143 inline bool parMark(HeapWord* addr); 144 145 void clear_range(MemRegion mr); 146 }; 147 148 // Represents the overflow mark stack used by concurrent marking. 149 // 150 // Stores oops in a huge buffer in virtual memory that is always fully committed. 151 // Resizing may only happen during a STW pause when the stack is empty. 152 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { 153 ReservedSpace _reserved_space; // Space currently reserved for the mark stack. 154 155 oop* _base; // Bottom address of allocated memory area. 156 size_t _capacity; // Maximum number of elements. 157 size_t _index; // One more than last occupied index. 158 159 size_t _saved_index; // Value of _index saved at start of GC to detect mark stack modifications during that time. 160 161 bool _overflow; 162 bool _should_expand; 163 164 // Resizes the mark stack to the given new capacity. Releases any previous 165 // memory if successful. 166 bool resize(size_t new_capacity); 167 168 bool stack_modified() const { return _index != _saved_index; } 169 public: 170 G1CMMarkStack(); 171 ~G1CMMarkStack(); 172 173 bool allocate(size_t capacity); 174 175 // Pushes the first "n" elements of the given buffer on the stack. 176 void par_push_arr(oop* buffer, size_t n); 177 178 // Moves up to max elements from the stack into the given buffer. Returns 179 // the number of elements pushed, and false if the array has been empty. 180 // Returns true if the buffer contains at least one element. 181 bool par_pop_arr(oop* buffer, size_t max, size_t* n); 182 183 bool is_empty() const { return _index == 0; } 184 size_t capacity() const { return _capacity; } 185 186 bool overflow() const { return _overflow; } 187 void clear_overflow() { _overflow = false; } 188 189 bool should_expand() const { return _should_expand; } 190 void set_should_expand(bool value) { _should_expand = value; } 191 192 // Expand the stack, typically in response to an overflow condition 193 void expand(); 194 195 size_t size() const { return _index; } 196 197 void set_empty() { _index = 0; clear_overflow(); } 198 199 // Record the current index. 200 void note_start_of_gc(); 201 202 // Make sure that we have not added any entries to the stack during GC. 203 void note_end_of_gc(); 204 205 // Apply fn to each oop in the mark stack, up to the bound recorded 206 // via one of the above "note" functions. The mark stack must not 207 // be modified while iterating. 208 template<typename Fn> void iterate(Fn fn); 209 }; 210 211 // Root Regions are regions that are not empty at the beginning of a 212 // marking cycle and which we might collect during an evacuation pause 213 // while the cycle is active. Given that, during evacuation pauses, we 214 // do not copy objects that are explicitly marked, what we have to do 215 // for the root regions is to scan them and mark all objects reachable 216 // from them. According to the SATB assumptions, we only need to visit 217 // each object once during marking. So, as long as we finish this scan 218 // before the next evacuation pause, we can copy the objects from the 219 // root regions without having to mark them or do anything else to them. 220 // 221 // Currently, we only support root region scanning once (at the start 222 // of the marking cycle) and the root regions are all the survivor 223 // regions populated during the initial-mark pause. 224 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { 225 private: 226 const G1SurvivorRegions* _survivors; 227 G1ConcurrentMark* _cm; 228 229 volatile bool _scan_in_progress; 230 volatile bool _should_abort; 231 volatile int _claimed_survivor_index; 232 233 void notify_scan_done(); 234 235 public: 236 G1CMRootRegions(); 237 // We actually do most of the initialization in this method. 238 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm); 239 240 // Reset the claiming / scanning of the root regions. 241 void prepare_for_scan(); 242 243 // Forces get_next() to return NULL so that the iteration aborts early. 244 void abort() { _should_abort = true; } 245 246 // Return true if the CM thread are actively scanning root regions, 247 // false otherwise. 248 bool scan_in_progress() { return _scan_in_progress; } 249 250 // Claim the next root region to scan atomically, or return NULL if 251 // all have been claimed. 252 HeapRegion* claim_next(); 253 254 // The number of root regions to scan. 255 uint num_root_regions() const; 256 257 void cancel_scan(); 258 259 // Flag that we're done with root region scanning and notify anyone 260 // who's waiting on it. If aborted is false, assume that all regions 261 // have been claimed. 262 void scan_finished(); 263 264 // If CM threads are still scanning root regions, wait until they 265 // are done. Return true if we had to wait, false otherwise. 266 bool wait_until_scan_finished(); 267 }; 268 269 class ConcurrentMarkThread; 270 271 class G1ConcurrentMark: public CHeapObj<mtGC> { 272 friend class ConcurrentMarkThread; 273 friend class G1ParNoteEndTask; 274 friend class G1VerifyLiveDataClosure; 275 friend class G1CMRefProcTaskProxy; 276 friend class G1CMRefProcTaskExecutor; 277 friend class G1CMKeepAliveAndDrainClosure; 278 friend class G1CMDrainMarkingStackClosure; 279 friend class G1CMBitMapClosure; 280 friend class G1CMConcurrentMarkingTask; 281 friend class G1CMMarkStack; 282 friend class G1CMRemarkTask; 283 friend class G1CMTask; 284 285 protected: 286 ConcurrentMarkThread* _cmThread; // The thread doing the work 287 G1CollectedHeap* _g1h; // The heap 288 uint _parallel_marking_threads; // The number of marking 289 // threads we're using 290 uint _max_parallel_marking_threads; // Max number of marking 291 // threads we'll ever use 292 double _sleep_factor; // How much we have to sleep, with 293 // respect to the work we just did, to 294 // meet the marking overhead goal 295 double _marking_task_overhead; // Marking target overhead for 296 // a single task 297 298 FreeRegionList _cleanup_list; 299 300 // Concurrent marking support structures 301 G1CMBitMap _markBitMap1; 302 G1CMBitMap _markBitMap2; 303 G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap 304 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 305 306 // Heap bounds 307 HeapWord* _heap_start; 308 HeapWord* _heap_end; 309 310 // Root region tracking and claiming 311 G1CMRootRegions _root_regions; 312 313 // For gray objects 314 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger 315 HeapWord* volatile _finger; // The global finger, region aligned, 316 // always points to the end of the 317 // last claimed region 318 319 // Marking tasks 320 uint _max_worker_id;// Maximum worker id 321 uint _active_tasks; // Task num currently active 322 G1CMTask** _tasks; // Task queue array (max_worker_id len) 323 G1CMTaskQueueSet* _task_queues; // Task queue set 324 ParallelTaskTerminator _terminator; // For termination 325 326 // Two sync barriers that are used to synchronize tasks when an 327 // overflow occurs. The algorithm is the following. All tasks enter 328 // the first one to ensure that they have all stopped manipulating 329 // the global data structures. After they exit it, they re-initialize 330 // their data structures and task 0 re-initializes the global data 331 // structures. Then, they enter the second sync barrier. This 332 // ensure, that no task starts doing work before all data 333 // structures (local and global) have been re-initialized. When they 334 // exit it, they are free to start working again. 335 WorkGangBarrierSync _first_overflow_barrier_sync; 336 WorkGangBarrierSync _second_overflow_barrier_sync; 337 338 // This is set by any task, when an overflow on the global data 339 // structures is detected 340 volatile bool _has_overflown; 341 // True: marking is concurrent, false: we're in remark 342 volatile bool _concurrent; 343 // Set at the end of a Full GC so that marking aborts 344 volatile bool _has_aborted; 345 346 // Used when remark aborts due to an overflow to indicate that 347 // another concurrent marking phase should start 348 volatile bool _restart_for_overflow; 349 350 // This is true from the very start of concurrent marking until the 351 // point when all the tasks complete their work. It is really used 352 // to determine the points between the end of concurrent marking and 353 // time of remark. 354 volatile bool _concurrent_marking_in_progress; 355 356 ConcurrentGCTimer* _gc_timer_cm; 357 358 G1OldTracer* _gc_tracer_cm; 359 360 // All of these times are in ms 361 NumberSeq _init_times; 362 NumberSeq _remark_times; 363 NumberSeq _remark_mark_times; 364 NumberSeq _remark_weak_ref_times; 365 NumberSeq _cleanup_times; 366 double _total_counting_time; 367 double _total_rs_scrub_time; 368 369 double* _accum_task_vtime; // Accumulated task vtime 370 371 WorkGang* _parallel_workers; 372 373 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 374 void weakRefsWork(bool clear_all_soft_refs); 375 376 void swapMarkBitMaps(); 377 378 // It resets the global marking data structures, as well as the 379 // task local ones; should be called during initial mark. 380 void reset(); 381 382 // Resets all the marking data structures. Called when we have to restart 383 // marking or when marking completes (via set_non_marking_state below). 384 void reset_marking_state(bool clear_overflow = true); 385 386 // We do this after we're done with marking so that the marking data 387 // structures are initialized to a sensible and predictable state. 388 void set_non_marking_state(); 389 390 // Called to indicate how many threads are currently active. 391 void set_concurrency(uint active_tasks); 392 393 // It should be called to indicate which phase we're in (concurrent 394 // mark or remark) and how many threads are currently active. 395 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 396 397 // Prints all gathered CM-related statistics 398 void print_stats(); 399 400 bool cleanup_list_is_empty() { 401 return _cleanup_list.is_empty(); 402 } 403 404 // Accessor methods 405 uint parallel_marking_threads() const { return _parallel_marking_threads; } 406 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 407 double sleep_factor() { return _sleep_factor; } 408 double marking_task_overhead() { return _marking_task_overhead;} 409 410 HeapWord* finger() { return _finger; } 411 bool concurrent() { return _concurrent; } 412 uint active_tasks() { return _active_tasks; } 413 ParallelTaskTerminator* terminator() { return &_terminator; } 414 415 // It claims the next available region to be scanned by a marking 416 // task/thread. It might return NULL if the next region is empty or 417 // we have run out of regions. In the latter case, out_of_regions() 418 // determines whether we've really run out of regions or the task 419 // should call claim_region() again. This might seem a bit 420 // awkward. Originally, the code was written so that claim_region() 421 // either successfully returned with a non-empty region or there 422 // were no more regions to be claimed. The problem with this was 423 // that, in certain circumstances, it iterated over large chunks of 424 // the heap finding only empty regions and, while it was working, it 425 // was preventing the calling task to call its regular clock 426 // method. So, this way, each task will spend very little time in 427 // claim_region() and is allowed to call the regular clock method 428 // frequently. 429 HeapRegion* claim_region(uint worker_id); 430 431 // It determines whether we've run out of regions to scan. Note that 432 // the finger can point past the heap end in case the heap was expanded 433 // to satisfy an allocation without doing a GC. This is fine, because all 434 // objects in those regions will be considered live anyway because of 435 // SATB guarantees (i.e. their TAMS will be equal to bottom). 436 bool out_of_regions() { return _finger >= _heap_end; } 437 438 // Returns the task with the given id 439 G1CMTask* task(int id) { 440 assert(0 <= id && id < (int) _active_tasks, 441 "task id not within active bounds"); 442 return _tasks[id]; 443 } 444 445 // Returns the task queue with the given id 446 G1CMTaskQueue* task_queue(int id) { 447 assert(0 <= id && id < (int) _active_tasks, 448 "task queue id not within active bounds"); 449 return (G1CMTaskQueue*) _task_queues->queue(id); 450 } 451 452 // Returns the task queue set 453 G1CMTaskQueueSet* task_queues() { return _task_queues; } 454 455 // Access / manipulation of the overflow flag which is set to 456 // indicate that the global stack has overflown 457 bool has_overflown() { return _has_overflown; } 458 void set_has_overflown() { _has_overflown = true; } 459 void clear_has_overflown() { _has_overflown = false; } 460 bool restart_for_overflow() { return _restart_for_overflow; } 461 462 // Methods to enter the two overflow sync barriers 463 void enter_first_sync_barrier(uint worker_id); 464 void enter_second_sync_barrier(uint worker_id); 465 466 // Card index of the bottom of the G1 heap. Used for biasing indices into 467 // the card bitmaps. 468 intptr_t _heap_bottom_card_num; 469 470 // Set to true when initialization is complete 471 bool _completed_initialization; 472 473 // end_timer, true to end gc timer after ending concurrent phase. 474 void register_concurrent_phase_end_common(bool end_timer); 475 476 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 477 // true, periodically insert checks to see if this method should exit prematurely. 478 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 479 public: 480 // Manipulation of the global mark stack. 481 // The push and pop operations are used by tasks for transfers 482 // between task-local queues and the global mark stack, and use 483 // locking for concurrency safety. 484 bool mark_stack_push(oop* arr, size_t n) { 485 _global_mark_stack.par_push_arr(arr, n); 486 if (_global_mark_stack.overflow()) { 487 set_has_overflown(); 488 return false; 489 } 490 return true; 491 } 492 void mark_stack_pop(oop* arr, size_t max, size_t* n) { 493 _global_mark_stack.par_pop_arr(arr, max, n); 494 } 495 size_t mark_stack_size() { return _global_mark_stack.size(); } 496 size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; } 497 bool mark_stack_overflow() { return _global_mark_stack.overflow(); } 498 bool mark_stack_empty() { return _global_mark_stack.is_empty(); } 499 500 G1CMRootRegions* root_regions() { return &_root_regions; } 501 502 bool concurrent_marking_in_progress() { 503 return _concurrent_marking_in_progress; 504 } 505 void set_concurrent_marking_in_progress() { 506 _concurrent_marking_in_progress = true; 507 } 508 void clear_concurrent_marking_in_progress() { 509 _concurrent_marking_in_progress = false; 510 } 511 512 void concurrent_cycle_start(); 513 void concurrent_cycle_end(); 514 515 void update_accum_task_vtime(int i, double vtime) { 516 _accum_task_vtime[i] += vtime; 517 } 518 519 double all_task_accum_vtime() { 520 double ret = 0.0; 521 for (uint i = 0; i < _max_worker_id; ++i) 522 ret += _accum_task_vtime[i]; 523 return ret; 524 } 525 526 // Attempts to steal an object from the task queues of other tasks 527 bool try_stealing(uint worker_id, int* hash_seed, oop& obj); 528 529 G1ConcurrentMark(G1CollectedHeap* g1h, 530 G1RegionToSpaceMapper* prev_bitmap_storage, 531 G1RegionToSpaceMapper* next_bitmap_storage); 532 ~G1ConcurrentMark(); 533 534 ConcurrentMarkThread* cmThread() { return _cmThread; } 535 536 G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 537 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 538 539 // Returns the number of GC threads to be used in a concurrent 540 // phase based on the number of GC threads being used in a STW 541 // phase. 542 uint scale_parallel_threads(uint n_par_threads); 543 544 // Calculates the number of GC threads to be used in a concurrent phase. 545 uint calc_parallel_marking_threads(); 546 547 // The following three are interaction between CM and 548 // G1CollectedHeap 549 550 // This notifies CM that a root during initial-mark needs to be 551 // grayed. It is MT-safe. hr is the region that 552 // contains the object and it's passed optionally from callers who 553 // might already have it (no point in recalculating it). 554 inline void grayRoot(oop obj, 555 HeapRegion* hr = NULL); 556 557 // Prepare internal data structures for the next mark cycle. This includes clearing 558 // the next mark bitmap and some internal data structures. This method is intended 559 // to be called concurrently to the mutator. It will yield to safepoint requests. 560 void cleanup_for_next_mark(); 561 562 // Clear the previous marking bitmap during safepoint. 563 void clear_prev_bitmap(WorkGang* workers); 564 565 // Return whether the next mark bitmap has no marks set. To be used for assertions 566 // only. Will not yield to pause requests. 567 bool nextMarkBitmapIsClear(); 568 569 // These two do the work that needs to be done before and after the 570 // initial root checkpoint. Since this checkpoint can be done at two 571 // different points (i.e. an explicit pause or piggy-backed on a 572 // young collection), then it's nice to be able to easily share the 573 // pre/post code. It might be the case that we can put everything in 574 // the post method. TP 575 void checkpointRootsInitialPre(); 576 void checkpointRootsInitialPost(); 577 578 // Scan all the root regions and mark everything reachable from 579 // them. 580 void scan_root_regions(); 581 582 // Scan a single root region and mark everything reachable from it. 583 void scanRootRegion(HeapRegion* hr); 584 585 // Do concurrent phase of marking, to a tentative transitive closure. 586 void mark_from_roots(); 587 588 void checkpointRootsFinal(bool clear_all_soft_refs); 589 void checkpointRootsFinalWork(); 590 void cleanup(); 591 void complete_cleanup(); 592 593 // Mark in the previous bitmap. NB: this is usually read-only, so use 594 // this carefully! 595 inline void markPrev(oop p); 596 597 // Clears marks for all objects in the given range, for the prev or 598 // next bitmaps. NB: the previous bitmap is usually 599 // read-only, so use this carefully! 600 void clearRangePrevBitmap(MemRegion mr); 601 602 // Notify data structures that a GC has started. 603 void note_start_of_gc() { 604 _global_mark_stack.note_start_of_gc(); 605 } 606 607 // Notify data structures that a GC is finished. 608 void note_end_of_gc() { 609 _global_mark_stack.note_end_of_gc(); 610 } 611 612 // Verify that there are no CSet oops on the stacks (taskqueues / 613 // global mark stack) and fingers (global / per-task). 614 // If marking is not in progress, it's a no-op. 615 void verify_no_cset_oops() PRODUCT_RETURN; 616 617 inline bool isPrevMarked(oop p) const; 618 619 inline bool do_yield_check(); 620 621 // Abandon current marking iteration due to a Full GC. 622 void abort(); 623 624 bool has_aborted() { return _has_aborted; } 625 626 void print_summary_info(); 627 628 void print_worker_threads_on(outputStream* st) const; 629 void threads_do(ThreadClosure* tc) const; 630 631 void print_on_error(outputStream* st) const; 632 633 // Attempts to mark the given object on the next mark bitmap. 634 inline bool par_mark(oop obj); 635 636 // Returns true if initialization was successfully completed. 637 bool completed_initialization() const { 638 return _completed_initialization; 639 } 640 641 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 642 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 643 644 private: 645 // Clear (Reset) all liveness count data. 646 void clear_live_data(WorkGang* workers); 647 648 #ifdef ASSERT 649 // Verify all of the above data structures that they are in initial state. 650 void verify_live_data_clear(); 651 #endif 652 653 // Aggregates the per-card liveness data based on the current marking. Also sets 654 // the amount of marked bytes for each region. 655 void create_live_data(); 656 657 void finalize_live_data(); 658 659 void verify_live_data(); 660 }; 661 662 // A class representing a marking task. 663 class G1CMTask : public TerminatorTerminator { 664 private: 665 enum PrivateConstants { 666 // The regular clock call is called once the scanned words reaches 667 // this limit 668 words_scanned_period = 12*1024, 669 // The regular clock call is called once the number of visited 670 // references reaches this limit 671 refs_reached_period = 384, 672 // Initial value for the hash seed, used in the work stealing code 673 init_hash_seed = 17, 674 // How many entries will be transferred between global stack and 675 // local queues at once. 676 global_stack_transfer_size = 1024 677 }; 678 679 uint _worker_id; 680 G1CollectedHeap* _g1h; 681 G1ConcurrentMark* _cm; 682 G1CMBitMap* _nextMarkBitMap; 683 // the task queue of this task 684 G1CMTaskQueue* _task_queue; 685 private: 686 // the task queue set---needed for stealing 687 G1CMTaskQueueSet* _task_queues; 688 // indicates whether the task has been claimed---this is only for 689 // debugging purposes 690 bool _claimed; 691 692 // number of calls to this task 693 int _calls; 694 695 // when the virtual timer reaches this time, the marking step should 696 // exit 697 double _time_target_ms; 698 // the start time of the current marking step 699 double _start_time_ms; 700 701 // the oop closure used for iterations over oops 702 G1CMOopClosure* _cm_oop_closure; 703 704 // the region this task is scanning, NULL if we're not scanning any 705 HeapRegion* _curr_region; 706 // the local finger of this task, NULL if we're not scanning a region 707 HeapWord* _finger; 708 // limit of the region this task is scanning, NULL if we're not scanning one 709 HeapWord* _region_limit; 710 711 // the number of words this task has scanned 712 size_t _words_scanned; 713 // When _words_scanned reaches this limit, the regular clock is 714 // called. Notice that this might be decreased under certain 715 // circumstances (i.e. when we believe that we did an expensive 716 // operation). 717 size_t _words_scanned_limit; 718 // the initial value of _words_scanned_limit (i.e. what it was 719 // before it was decreased). 720 size_t _real_words_scanned_limit; 721 722 // the number of references this task has visited 723 size_t _refs_reached; 724 // When _refs_reached reaches this limit, the regular clock is 725 // called. Notice this this might be decreased under certain 726 // circumstances (i.e. when we believe that we did an expensive 727 // operation). 728 size_t _refs_reached_limit; 729 // the initial value of _refs_reached_limit (i.e. what it was before 730 // it was decreased). 731 size_t _real_refs_reached_limit; 732 733 // used by the work stealing stuff 734 int _hash_seed; 735 // if this is true, then the task has aborted for some reason 736 bool _has_aborted; 737 // set when the task aborts because it has met its time quota 738 bool _has_timed_out; 739 // true when we're draining SATB buffers; this avoids the task 740 // aborting due to SATB buffers being available (as we're already 741 // dealing with them) 742 bool _draining_satb_buffers; 743 744 // number sequence of past step times 745 NumberSeq _step_times_ms; 746 // elapsed time of this task 747 double _elapsed_time_ms; 748 // termination time of this task 749 double _termination_time_ms; 750 // when this task got into the termination protocol 751 double _termination_start_time_ms; 752 753 // true when the task is during a concurrent phase, false when it is 754 // in the remark phase (so, in the latter case, we do not have to 755 // check all the things that we have to check during the concurrent 756 // phase, i.e. SATB buffer availability...) 757 bool _concurrent; 758 759 TruncatedSeq _marking_step_diffs_ms; 760 761 // it updates the local fields after this task has claimed 762 // a new region to scan 763 void setup_for_region(HeapRegion* hr); 764 // it brings up-to-date the limit of the region 765 void update_region_limit(); 766 767 // called when either the words scanned or the refs visited limit 768 // has been reached 769 void reached_limit(); 770 // recalculates the words scanned and refs visited limits 771 void recalculate_limits(); 772 // decreases the words scanned and refs visited limits when we reach 773 // an expensive operation 774 void decrease_limits(); 775 // it checks whether the words scanned or refs visited reached their 776 // respective limit and calls reached_limit() if they have 777 void check_limits() { 778 if (_words_scanned >= _words_scanned_limit || 779 _refs_reached >= _refs_reached_limit) { 780 reached_limit(); 781 } 782 } 783 // this is supposed to be called regularly during a marking step as 784 // it checks a bunch of conditions that might cause the marking step 785 // to abort 786 void regular_clock_call(); 787 bool concurrent() { return _concurrent; } 788 789 // Test whether obj might have already been passed over by the 790 // mark bitmap scan, and so needs to be pushed onto the mark stack. 791 bool is_below_finger(oop obj, HeapWord* global_finger) const; 792 793 template<bool scan> void process_grey_object(oop obj); 794 795 public: 796 // It resets the task; it should be called right at the beginning of 797 // a marking phase. 798 void reset(G1CMBitMap* _nextMarkBitMap); 799 // it clears all the fields that correspond to a claimed region. 800 void clear_region_fields(); 801 802 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 803 804 // The main method of this class which performs a marking step 805 // trying not to exceed the given duration. However, it might exit 806 // prematurely, according to some conditions (i.e. SATB buffers are 807 // available for processing). 808 void do_marking_step(double target_ms, 809 bool do_termination, 810 bool is_serial); 811 812 // These two calls start and stop the timer 813 void record_start_time() { 814 _elapsed_time_ms = os::elapsedTime() * 1000.0; 815 } 816 void record_end_time() { 817 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 818 } 819 820 // returns the worker ID associated with this task. 821 uint worker_id() { return _worker_id; } 822 823 // From TerminatorTerminator. It determines whether this task should 824 // exit the termination protocol after it's entered it. 825 virtual bool should_exit_termination(); 826 827 // Resets the local region fields after a task has finished scanning a 828 // region; or when they have become stale as a result of the region 829 // being evacuated. 830 void giveup_current_region(); 831 832 HeapWord* finger() { return _finger; } 833 834 bool has_aborted() { return _has_aborted; } 835 void set_has_aborted() { _has_aborted = true; } 836 void clear_has_aborted() { _has_aborted = false; } 837 bool has_timed_out() { return _has_timed_out; } 838 bool claimed() { return _claimed; } 839 840 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 841 842 // Increment the number of references this task has visited. 843 void increment_refs_reached() { ++_refs_reached; } 844 845 // Grey the object by marking it. If not already marked, push it on 846 // the local queue if below the finger. 847 // obj is below its region's NTAMS. 848 inline void make_reference_grey(oop obj); 849 850 // Grey the object (by calling make_grey_reference) if required, 851 // e.g. obj is below its containing region's NTAMS. 852 // Precondition: obj is a valid heap object. 853 inline void deal_with_reference(oop obj); 854 855 // It scans an object and visits its children. 856 inline void scan_object(oop obj); 857 858 // It pushes an object on the local queue. 859 inline void push(oop obj); 860 861 // These two move entries to/from the global stack. 862 void move_entries_to_global_stack(); 863 void get_entries_from_global_stack(); 864 865 // It pops and scans objects from the local queue. If partially is 866 // true, then it stops when the queue size is of a given limit. If 867 // partially is false, then it stops when the queue is empty. 868 void drain_local_queue(bool partially); 869 // It moves entries from the global stack to the local queue and 870 // drains the local queue. If partially is true, then it stops when 871 // both the global stack and the local queue reach a given size. If 872 // partially if false, it tries to empty them totally. 873 void drain_global_stack(bool partially); 874 // It keeps picking SATB buffers and processing them until no SATB 875 // buffers are available. 876 void drain_satb_buffers(); 877 878 // moves the local finger to a new location 879 inline void move_finger_to(HeapWord* new_finger) { 880 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 881 _finger = new_finger; 882 } 883 884 G1CMTask(uint worker_id, 885 G1ConcurrentMark *cm, 886 G1CMTaskQueue* task_queue, 887 G1CMTaskQueueSet* task_queues); 888 889 // it prints statistics associated with this task 890 void print_stats(); 891 }; 892 893 // Class that's used to to print out per-region liveness 894 // information. It's currently used at the end of marking and also 895 // after we sort the old regions at the end of the cleanup operation. 896 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 897 private: 898 // Accumulators for these values. 899 size_t _total_used_bytes; 900 size_t _total_capacity_bytes; 901 size_t _total_prev_live_bytes; 902 size_t _total_next_live_bytes; 903 904 // Accumulator for the remembered set size 905 size_t _total_remset_bytes; 906 907 // Accumulator for strong code roots memory size 908 size_t _total_strong_code_roots_bytes; 909 910 static double perc(size_t val, size_t total) { 911 if (total == 0) { 912 return 0.0; 913 } else { 914 return 100.0 * ((double) val / (double) total); 915 } 916 } 917 918 static double bytes_to_mb(size_t val) { 919 return (double) val / (double) M; 920 } 921 922 public: 923 // The header and footer are printed in the constructor and 924 // destructor respectively. 925 G1PrintRegionLivenessInfoClosure(const char* phase_name); 926 virtual bool doHeapRegion(HeapRegion* r); 927 ~G1PrintRegionLivenessInfoClosure(); 928 }; 929 930 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP