1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1RegionToSpaceMapper.hpp" 30 #include "gc/g1/heapRegionSet.hpp" 31 #include "gc/shared/taskqueue.hpp" 32 33 class G1CollectedHeap; 34 class G1CMBitMap; 35 class G1CMTask; 36 class G1ConcurrentMark; 37 class ConcurrentGCTimer; 38 class G1OldTracer; 39 typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue; 40 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 41 42 // Closure used by CM during concurrent reference discovery 43 // and reference processing (during remarking) to determine 44 // if a particular object is alive. It is primarily used 45 // to determine if referents of discovered reference objects 46 // are alive. An instance is also embedded into the 47 // reference processor as the _is_alive_non_header field 48 class G1CMIsAliveClosure: public BoolObjectClosure { 49 G1CollectedHeap* _g1; 50 public: 51 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 52 53 bool do_object_b(oop obj); 54 }; 55 56 // A generic CM bit map. This is essentially a wrapper around the BitMap 57 // class, with one bit per (1<<_shifter) HeapWords. 58 59 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC { 60 protected: 61 HeapWord* _bmStartWord; // base address of range covered by map 62 size_t _bmWordSize; // map size (in #HeapWords covered) 63 const int _shifter; // map to char or bit 64 BitMap _bm; // the bit map itself 65 66 public: 67 // constructor 68 G1CMBitMapRO(int shifter); 69 70 // inquiries 71 HeapWord* startWord() const { return _bmStartWord; } 72 // the following is one past the last word in space 73 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 74 75 // read marks 76 77 bool isMarked(HeapWord* addr) const { 78 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 79 "outside underlying space?"); 80 return _bm.at(heapWordToOffset(addr)); 81 } 82 83 // iteration 84 inline bool iterate(BitMapClosure* cl, MemRegion mr); 85 86 // Return the address corresponding to the next marked bit at or after 87 // "addr", and before "limit", if "limit" is non-NULL. If there is no 88 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 89 HeapWord* getNextMarkedWordAddress(const HeapWord* addr, 90 const HeapWord* limit = NULL) const; 91 92 // conversion utilities 93 HeapWord* offsetToHeapWord(size_t offset) const { 94 return _bmStartWord + (offset << _shifter); 95 } 96 size_t heapWordToOffset(const HeapWord* addr) const { 97 return pointer_delta(addr, _bmStartWord) >> _shifter; 98 } 99 100 // The argument addr should be the start address of a valid object 101 inline HeapWord* nextObject(HeapWord* addr); 102 103 void print_on_error(outputStream* st, const char* prefix) const; 104 105 // debugging 106 NOT_PRODUCT(bool covers(MemRegion rs) const;) 107 }; 108 109 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener { 110 private: 111 G1CMBitMap* _bm; 112 public: 113 G1CMBitMapMappingChangedListener() : _bm(NULL) {} 114 115 void set_bitmap(G1CMBitMap* bm) { _bm = bm; } 116 117 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); 118 }; 119 120 class G1CMBitMap : public G1CMBitMapRO { 121 private: 122 G1CMBitMapMappingChangedListener _listener; 123 124 public: 125 static size_t compute_size(size_t heap_size); 126 // Returns the amount of bytes on the heap between two marks in the bitmap. 127 static size_t mark_distance(); 128 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the 129 // mark bitmap corresponds to. This is the same as the mark distance above. 130 static size_t heap_map_factor() { 131 return mark_distance(); 132 } 133 134 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } 135 136 // Initializes the underlying BitMap to cover the given area. 137 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); 138 139 // Write marks. 140 inline void mark(HeapWord* addr); 141 inline void clear(HeapWord* addr); 142 inline bool parMark(HeapWord* addr); 143 144 void clear_range(MemRegion mr); 145 }; 146 147 // Represents a marking stack used by ConcurrentMarking in the G1 collector. 148 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { 149 VirtualSpace _virtual_space; // Underlying backing store for actual stack 150 G1ConcurrentMark* _cm; 151 oop* _base; // bottom of stack 152 jint _index; // one more than last occupied index 153 jint _capacity; // max #elements 154 jint _saved_index; // value of _index saved at start of GC 155 156 bool _overflow; 157 bool _should_expand; 158 159 public: 160 G1CMMarkStack(G1ConcurrentMark* cm); 161 ~G1CMMarkStack(); 162 163 bool allocate(size_t capacity); 164 165 // Pushes the first "n" elements of "ptr_arr" on the stack. 166 // Locking impl: concurrency is allowed only with 167 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 168 // locking strategy. 169 void par_push_arr(oop* ptr_arr, int n); 170 171 // If returns false, the array was empty. Otherwise, removes up to "max" 172 // elements from the stack, and transfers them to "ptr_arr" in an 173 // unspecified order. The actual number transferred is given in "n" ("n 174 // == 0" is deliberately redundant with the return value.) Locking impl: 175 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 176 // operations, which use the same locking strategy. 177 bool par_pop_arr(oop* ptr_arr, int max, int* n); 178 179 bool isEmpty() { return _index == 0; } 180 int maxElems() { return _capacity; } 181 182 bool overflow() { return _overflow; } 183 void clear_overflow() { _overflow = false; } 184 185 bool should_expand() const { return _should_expand; } 186 void set_should_expand(); 187 188 // Expand the stack, typically in response to an overflow condition 189 void expand(); 190 191 int size() { return _index; } 192 193 void setEmpty() { _index = 0; clear_overflow(); } 194 195 // Record the current index. 196 void note_start_of_gc(); 197 198 // Make sure that we have not added any entries to the stack during GC. 199 void note_end_of_gc(); 200 201 // Apply fn to each oop in the mark stack, up to the bound recorded 202 // via one of the above "note" functions. The mark stack must not 203 // be modified while iterating. 204 template<typename Fn> void iterate(Fn fn); 205 }; 206 207 class YoungList; 208 209 // Root Regions are regions that are not empty at the beginning of a 210 // marking cycle and which we might collect during an evacuation pause 211 // while the cycle is active. Given that, during evacuation pauses, we 212 // do not copy objects that are explicitly marked, what we have to do 213 // for the root regions is to scan them and mark all objects reachable 214 // from them. According to the SATB assumptions, we only need to visit 215 // each object once during marking. So, as long as we finish this scan 216 // before the next evacuation pause, we can copy the objects from the 217 // root regions without having to mark them or do anything else to them. 218 // 219 // Currently, we only support root region scanning once (at the start 220 // of the marking cycle) and the root regions are all the survivor 221 // regions populated during the initial-mark pause. 222 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { 223 private: 224 YoungList* _young_list; 225 G1ConcurrentMark* _cm; 226 227 volatile bool _scan_in_progress; 228 volatile bool _should_abort; 229 HeapRegion* volatile _next_survivor; 230 231 void notify_scan_done(); 232 233 public: 234 G1CMRootRegions(); 235 // We actually do most of the initialization in this method. 236 void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm); 237 238 // Reset the claiming / scanning of the root regions. 239 void prepare_for_scan(); 240 241 // Forces get_next() to return NULL so that the iteration aborts early. 242 void abort() { _should_abort = true; } 243 244 // Return true if the CM thread are actively scanning root regions, 245 // false otherwise. 246 bool scan_in_progress() { return _scan_in_progress; } 247 248 // Claim the next root region to scan atomically, or return NULL if 249 // all have been claimed. 250 HeapRegion* claim_next(); 251 252 void cancel_scan(); 253 254 // Flag that we're done with root region scanning and notify anyone 255 // who's waiting on it. If aborted is false, assume that all regions 256 // have been claimed. 257 void scan_finished(); 258 259 // If CM threads are still scanning root regions, wait until they 260 // are done. Return true if we had to wait, false otherwise. 261 bool wait_until_scan_finished(); 262 }; 263 264 class ConcurrentMarkThread; 265 266 class G1ConcurrentMark: public CHeapObj<mtGC> { 267 friend class ConcurrentMarkThread; 268 friend class G1ParNoteEndTask; 269 friend class CalcLiveObjectsClosure; 270 friend class G1CMRefProcTaskProxy; 271 friend class G1CMRefProcTaskExecutor; 272 friend class G1CMKeepAliveAndDrainClosure; 273 friend class G1CMDrainMarkingStackClosure; 274 friend class G1CMBitMapClosure; 275 friend class G1CMConcurrentMarkingTask; 276 friend class G1CMMarkStack; 277 friend class G1CMRemarkTask; 278 friend class G1CMTask; 279 280 protected: 281 ConcurrentMarkThread* _cmThread; // The thread doing the work 282 G1CollectedHeap* _g1h; // The heap 283 uint _parallel_marking_threads; // The number of marking 284 // threads we're using 285 uint _max_parallel_marking_threads; // Max number of marking 286 // threads we'll ever use 287 double _sleep_factor; // How much we have to sleep, with 288 // respect to the work we just did, to 289 // meet the marking overhead goal 290 double _marking_task_overhead; // Marking target overhead for 291 // a single task 292 293 FreeRegionList _cleanup_list; 294 295 // Concurrent marking support structures 296 G1CMBitMap _markBitMap1; 297 G1CMBitMap _markBitMap2; 298 G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap 299 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 300 301 BitMap _region_bm; 302 BitMap _card_bm; 303 304 // Heap bounds 305 HeapWord* _heap_start; 306 HeapWord* _heap_end; 307 308 // Root region tracking and claiming 309 G1CMRootRegions _root_regions; 310 311 // For gray objects 312 G1CMMarkStack _markStack; // Grey objects behind global finger 313 HeapWord* volatile _finger; // The global finger, region aligned, 314 // always points to the end of the 315 // last claimed region 316 317 // Marking tasks 318 uint _max_worker_id;// Maximum worker id 319 uint _active_tasks; // Task num currently active 320 G1CMTask** _tasks; // Task queue array (max_worker_id len) 321 G1CMTaskQueueSet* _task_queues; // Task queue set 322 ParallelTaskTerminator _terminator; // For termination 323 324 // Two sync barriers that are used to synchronize tasks when an 325 // overflow occurs. The algorithm is the following. All tasks enter 326 // the first one to ensure that they have all stopped manipulating 327 // the global data structures. After they exit it, they re-initialize 328 // their data structures and task 0 re-initializes the global data 329 // structures. Then, they enter the second sync barrier. This 330 // ensure, that no task starts doing work before all data 331 // structures (local and global) have been re-initialized. When they 332 // exit it, they are free to start working again. 333 WorkGangBarrierSync _first_overflow_barrier_sync; 334 WorkGangBarrierSync _second_overflow_barrier_sync; 335 336 // This is set by any task, when an overflow on the global data 337 // structures is detected 338 volatile bool _has_overflown; 339 // True: marking is concurrent, false: we're in remark 340 volatile bool _concurrent; 341 // Set at the end of a Full GC so that marking aborts 342 volatile bool _has_aborted; 343 344 // Used when remark aborts due to an overflow to indicate that 345 // another concurrent marking phase should start 346 volatile bool _restart_for_overflow; 347 348 // This is true from the very start of concurrent marking until the 349 // point when all the tasks complete their work. It is really used 350 // to determine the points between the end of concurrent marking and 351 // time of remark. 352 volatile bool _concurrent_marking_in_progress; 353 354 ConcurrentGCTimer* _gc_timer_cm; 355 356 G1OldTracer* _gc_tracer_cm; 357 358 // All of these times are in ms 359 NumberSeq _init_times; 360 NumberSeq _remark_times; 361 NumberSeq _remark_mark_times; 362 NumberSeq _remark_weak_ref_times; 363 NumberSeq _cleanup_times; 364 double _total_counting_time; 365 double _total_rs_scrub_time; 366 367 double* _accum_task_vtime; // Accumulated task vtime 368 369 WorkGang* _parallel_workers; 370 371 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 372 void weakRefsWork(bool clear_all_soft_refs); 373 374 void swapMarkBitMaps(); 375 376 // It resets the global marking data structures, as well as the 377 // task local ones; should be called during initial mark. 378 void reset(); 379 380 // Resets all the marking data structures. Called when we have to restart 381 // marking or when marking completes (via set_non_marking_state below). 382 void reset_marking_state(bool clear_overflow = true); 383 384 // We do this after we're done with marking so that the marking data 385 // structures are initialized to a sensible and predictable state. 386 void set_non_marking_state(); 387 388 // Called to indicate how many threads are currently active. 389 void set_concurrency(uint active_tasks); 390 391 // It should be called to indicate which phase we're in (concurrent 392 // mark or remark) and how many threads are currently active. 393 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 394 395 // Prints all gathered CM-related statistics 396 void print_stats(); 397 398 bool cleanup_list_is_empty() { 399 return _cleanup_list.is_empty(); 400 } 401 402 // Accessor methods 403 uint parallel_marking_threads() const { return _parallel_marking_threads; } 404 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 405 double sleep_factor() { return _sleep_factor; } 406 double marking_task_overhead() { return _marking_task_overhead;} 407 408 HeapWord* finger() { return _finger; } 409 bool concurrent() { return _concurrent; } 410 uint active_tasks() { return _active_tasks; } 411 ParallelTaskTerminator* terminator() { return &_terminator; } 412 413 // It claims the next available region to be scanned by a marking 414 // task/thread. It might return NULL if the next region is empty or 415 // we have run out of regions. In the latter case, out_of_regions() 416 // determines whether we've really run out of regions or the task 417 // should call claim_region() again. This might seem a bit 418 // awkward. Originally, the code was written so that claim_region() 419 // either successfully returned with a non-empty region or there 420 // were no more regions to be claimed. The problem with this was 421 // that, in certain circumstances, it iterated over large chunks of 422 // the heap finding only empty regions and, while it was working, it 423 // was preventing the calling task to call its regular clock 424 // method. So, this way, each task will spend very little time in 425 // claim_region() and is allowed to call the regular clock method 426 // frequently. 427 HeapRegion* claim_region(uint worker_id); 428 429 // It determines whether we've run out of regions to scan. Note that 430 // the finger can point past the heap end in case the heap was expanded 431 // to satisfy an allocation without doing a GC. This is fine, because all 432 // objects in those regions will be considered live anyway because of 433 // SATB guarantees (i.e. their TAMS will be equal to bottom). 434 bool out_of_regions() { return _finger >= _heap_end; } 435 436 // Returns the task with the given id 437 G1CMTask* task(int id) { 438 assert(0 <= id && id < (int) _active_tasks, 439 "task id not within active bounds"); 440 return _tasks[id]; 441 } 442 443 // Returns the task queue with the given id 444 G1CMTaskQueue* task_queue(int id) { 445 assert(0 <= id && id < (int) _active_tasks, 446 "task queue id not within active bounds"); 447 return (G1CMTaskQueue*) _task_queues->queue(id); 448 } 449 450 // Returns the task queue set 451 G1CMTaskQueueSet* task_queues() { return _task_queues; } 452 453 // Access / manipulation of the overflow flag which is set to 454 // indicate that the global stack has overflown 455 bool has_overflown() { return _has_overflown; } 456 void set_has_overflown() { _has_overflown = true; } 457 void clear_has_overflown() { _has_overflown = false; } 458 bool restart_for_overflow() { return _restart_for_overflow; } 459 460 // Methods to enter the two overflow sync barriers 461 void enter_first_sync_barrier(uint worker_id); 462 void enter_second_sync_barrier(uint worker_id); 463 464 // Live Data Counting data structures... 465 // These data structures are initialized at the start of 466 // marking. They are written to while marking is active. 467 // They are aggregated during remark; the aggregated values 468 // are then used to populate the _region_bm, _card_bm, and 469 // the total live bytes, which are then subsequently updated 470 // during cleanup. 471 472 // An array of bitmaps (one bit map per task). Each bitmap 473 // is used to record the cards spanned by the live objects 474 // marked by that task/worker. 475 BitMap* _count_card_bitmaps; 476 477 // Used to record the number of marked live bytes 478 // (for each region, by worker thread). 479 size_t** _count_marked_bytes; 480 481 // Card index of the bottom of the G1 heap. Used for biasing indices into 482 // the card bitmaps. 483 intptr_t _heap_bottom_card_num; 484 485 // Set to true when initialization is complete 486 bool _completed_initialization; 487 488 // end_timer, true to end gc timer after ending concurrent phase. 489 void register_concurrent_phase_end_common(bool end_timer); 490 491 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 492 // true, periodically insert checks to see if this method should exit prematurely. 493 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 494 public: 495 // Manipulation of the global mark stack. 496 // The push and pop operations are used by tasks for transfers 497 // between task-local queues and the global mark stack, and use 498 // locking for concurrency safety. 499 bool mark_stack_push(oop* arr, int n) { 500 _markStack.par_push_arr(arr, n); 501 if (_markStack.overflow()) { 502 set_has_overflown(); 503 return false; 504 } 505 return true; 506 } 507 void mark_stack_pop(oop* arr, int max, int* n) { 508 _markStack.par_pop_arr(arr, max, n); 509 } 510 size_t mark_stack_size() { return _markStack.size(); } 511 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 512 bool mark_stack_overflow() { return _markStack.overflow(); } 513 bool mark_stack_empty() { return _markStack.isEmpty(); } 514 515 G1CMRootRegions* root_regions() { return &_root_regions; } 516 517 bool concurrent_marking_in_progress() { 518 return _concurrent_marking_in_progress; 519 } 520 void set_concurrent_marking_in_progress() { 521 _concurrent_marking_in_progress = true; 522 } 523 void clear_concurrent_marking_in_progress() { 524 _concurrent_marking_in_progress = false; 525 } 526 527 void concurrent_cycle_start(); 528 void concurrent_cycle_end(); 529 530 void update_accum_task_vtime(int i, double vtime) { 531 _accum_task_vtime[i] += vtime; 532 } 533 534 double all_task_accum_vtime() { 535 double ret = 0.0; 536 for (uint i = 0; i < _max_worker_id; ++i) 537 ret += _accum_task_vtime[i]; 538 return ret; 539 } 540 541 // Attempts to steal an object from the task queues of other tasks 542 bool try_stealing(uint worker_id, int* hash_seed, oop& obj); 543 544 G1ConcurrentMark(G1CollectedHeap* g1h, 545 G1RegionToSpaceMapper* prev_bitmap_storage, 546 G1RegionToSpaceMapper* next_bitmap_storage); 547 ~G1ConcurrentMark(); 548 549 ConcurrentMarkThread* cmThread() { return _cmThread; } 550 551 G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 552 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 553 554 // Returns the number of GC threads to be used in a concurrent 555 // phase based on the number of GC threads being used in a STW 556 // phase. 557 uint scale_parallel_threads(uint n_par_threads); 558 559 // Calculates the number of GC threads to be used in a concurrent phase. 560 uint calc_parallel_marking_threads(); 561 562 // The following three are interaction between CM and 563 // G1CollectedHeap 564 565 // This notifies CM that a root during initial-mark needs to be 566 // grayed. It is MT-safe. word_size is the size of the object in 567 // words. It is passed explicitly as sometimes we cannot calculate 568 // it from the given object because it might be in an inconsistent 569 // state (e.g., in to-space and being copied). So the caller is 570 // responsible for dealing with this issue (e.g., get the size from 571 // the from-space image when the to-space image might be 572 // inconsistent) and always passing the size. hr is the region that 573 // contains the object and it's passed optionally from callers who 574 // might already have it (no point in recalculating it). 575 inline void grayRoot(oop obj, 576 size_t word_size, 577 uint worker_id, 578 HeapRegion* hr = NULL); 579 580 // Prepare internal data structures for the next mark cycle. This includes clearing 581 // the next mark bitmap and some internal data structures. This method is intended 582 // to be called concurrently to the mutator. It will yield to safepoint requests. 583 void cleanup_for_next_mark(); 584 585 // Clear the previous marking bitmap during safepoint. 586 void clear_prev_bitmap(WorkGang* workers); 587 588 // Return whether the next mark bitmap has no marks set. To be used for assertions 589 // only. Will not yield to pause requests. 590 bool nextMarkBitmapIsClear(); 591 592 // These two do the work that needs to be done before and after the 593 // initial root checkpoint. Since this checkpoint can be done at two 594 // different points (i.e. an explicit pause or piggy-backed on a 595 // young collection), then it's nice to be able to easily share the 596 // pre/post code. It might be the case that we can put everything in 597 // the post method. TP 598 void checkpointRootsInitialPre(); 599 void checkpointRootsInitialPost(); 600 601 // Scan all the root regions and mark everything reachable from 602 // them. 603 void scan_root_regions(); 604 605 // Scan a single root region and mark everything reachable from it. 606 void scanRootRegion(HeapRegion* hr, uint worker_id); 607 608 // Do concurrent phase of marking, to a tentative transitive closure. 609 void mark_from_roots(); 610 611 void checkpointRootsFinal(bool clear_all_soft_refs); 612 void checkpointRootsFinalWork(); 613 void cleanup(); 614 void complete_cleanup(); 615 616 // Mark in the previous bitmap. NB: this is usually read-only, so use 617 // this carefully! 618 inline void markPrev(oop p); 619 620 // Clears marks for all objects in the given range, for the prev or 621 // next bitmaps. NB: the previous bitmap is usually 622 // read-only, so use this carefully! 623 void clearRangePrevBitmap(MemRegion mr); 624 625 // Notify data structures that a GC has started. 626 void note_start_of_gc() { 627 _markStack.note_start_of_gc(); 628 } 629 630 // Notify data structures that a GC is finished. 631 void note_end_of_gc() { 632 _markStack.note_end_of_gc(); 633 } 634 635 // Verify that there are no CSet oops on the stacks (taskqueues / 636 // global mark stack) and fingers (global / per-task). 637 // If marking is not in progress, it's a no-op. 638 void verify_no_cset_oops() PRODUCT_RETURN; 639 640 inline bool isPrevMarked(oop p) const; 641 642 inline bool do_yield_check(uint worker_i = 0); 643 644 // Called to abort the marking cycle after a Full GC takes place. 645 void abort(); 646 647 bool has_aborted() { return _has_aborted; } 648 649 void print_summary_info(); 650 651 void print_worker_threads_on(outputStream* st) const; 652 653 void print_on_error(outputStream* st) const; 654 655 // Liveness counting 656 657 // Utility routine to set an exclusive range of cards on the given 658 // card liveness bitmap 659 inline void set_card_bitmap_range(BitMap* card_bm, 660 BitMap::idx_t start_idx, 661 BitMap::idx_t end_idx, 662 bool is_par); 663 664 // Returns the card number of the bottom of the G1 heap. 665 // Used in biasing indices into accounting card bitmaps. 666 intptr_t heap_bottom_card_num() const { 667 return _heap_bottom_card_num; 668 } 669 670 // Returns the card bitmap for a given task or worker id. 671 BitMap* count_card_bitmap_for(uint worker_id) { 672 assert(worker_id < _max_worker_id, "oob"); 673 assert(_count_card_bitmaps != NULL, "uninitialized"); 674 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 675 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 676 return task_card_bm; 677 } 678 679 // Returns the array containing the marked bytes for each region, 680 // for the given worker or task id. 681 size_t* count_marked_bytes_array_for(uint worker_id) { 682 assert(worker_id < _max_worker_id, "oob"); 683 assert(_count_marked_bytes != NULL, "uninitialized"); 684 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 685 assert(marked_bytes_array != NULL, "uninitialized"); 686 return marked_bytes_array; 687 } 688 689 // Returns the index in the liveness accounting card table bitmap 690 // for the given address 691 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 692 693 // Counts the size of the given memory region in the the given 694 // marked_bytes array slot for the given HeapRegion. 695 // Sets the bits in the given card bitmap that are associated with the 696 // cards that are spanned by the memory region. 697 inline void count_region(MemRegion mr, 698 HeapRegion* hr, 699 size_t* marked_bytes_array, 700 BitMap* task_card_bm); 701 702 // Counts the given object in the given task/worker counting 703 // data structures. 704 inline void count_object(oop obj, 705 HeapRegion* hr, 706 size_t* marked_bytes_array, 707 BitMap* task_card_bm, 708 size_t word_size); 709 710 // Attempts to mark the given object and, if successful, counts 711 // the object in the given task/worker counting structures. 712 inline bool par_mark_and_count(oop obj, 713 HeapRegion* hr, 714 size_t* marked_bytes_array, 715 BitMap* task_card_bm); 716 717 // Attempts to mark the given object and, if successful, counts 718 // the object in the task/worker counting structures for the 719 // given worker id. 720 inline bool par_mark_and_count(oop obj, 721 size_t word_size, 722 HeapRegion* hr, 723 uint worker_id); 724 725 // Returns true if initialization was successfully completed. 726 bool completed_initialization() const { 727 return _completed_initialization; 728 } 729 730 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 731 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 732 733 protected: 734 // Clear all the per-task bitmaps and arrays used to store the 735 // counting data. 736 void clear_all_count_data(); 737 738 // Aggregates the counting data for each worker/task 739 // that was constructed while marking. Also sets 740 // the amount of marked bytes for each region and 741 // the top at concurrent mark count. 742 void aggregate_count_data(); 743 744 // Verification routine 745 void verify_count_data(); 746 }; 747 748 // A class representing a marking task. 749 class G1CMTask : public TerminatorTerminator { 750 private: 751 enum PrivateConstants { 752 // the regular clock call is called once the scanned words reaches 753 // this limit 754 words_scanned_period = 12*1024, 755 // the regular clock call is called once the number of visited 756 // references reaches this limit 757 refs_reached_period = 384, 758 // initial value for the hash seed, used in the work stealing code 759 init_hash_seed = 17, 760 // how many entries will be transferred between global stack and 761 // local queues 762 global_stack_transfer_size = 16 763 }; 764 765 uint _worker_id; 766 G1CollectedHeap* _g1h; 767 G1ConcurrentMark* _cm; 768 G1CMBitMap* _nextMarkBitMap; 769 // the task queue of this task 770 G1CMTaskQueue* _task_queue; 771 private: 772 // the task queue set---needed for stealing 773 G1CMTaskQueueSet* _task_queues; 774 // indicates whether the task has been claimed---this is only for 775 // debugging purposes 776 bool _claimed; 777 778 // number of calls to this task 779 int _calls; 780 781 // when the virtual timer reaches this time, the marking step should 782 // exit 783 double _time_target_ms; 784 // the start time of the current marking step 785 double _start_time_ms; 786 787 // the oop closure used for iterations over oops 788 G1CMOopClosure* _cm_oop_closure; 789 790 // the region this task is scanning, NULL if we're not scanning any 791 HeapRegion* _curr_region; 792 // the local finger of this task, NULL if we're not scanning a region 793 HeapWord* _finger; 794 // limit of the region this task is scanning, NULL if we're not scanning one 795 HeapWord* _region_limit; 796 797 // the number of words this task has scanned 798 size_t _words_scanned; 799 // When _words_scanned reaches this limit, the regular clock is 800 // called. Notice that this might be decreased under certain 801 // circumstances (i.e. when we believe that we did an expensive 802 // operation). 803 size_t _words_scanned_limit; 804 // the initial value of _words_scanned_limit (i.e. what it was 805 // before it was decreased). 806 size_t _real_words_scanned_limit; 807 808 // the number of references this task has visited 809 size_t _refs_reached; 810 // When _refs_reached reaches this limit, the regular clock is 811 // called. Notice this this might be decreased under certain 812 // circumstances (i.e. when we believe that we did an expensive 813 // operation). 814 size_t _refs_reached_limit; 815 // the initial value of _refs_reached_limit (i.e. what it was before 816 // it was decreased). 817 size_t _real_refs_reached_limit; 818 819 // used by the work stealing stuff 820 int _hash_seed; 821 // if this is true, then the task has aborted for some reason 822 bool _has_aborted; 823 // set when the task aborts because it has met its time quota 824 bool _has_timed_out; 825 // true when we're draining SATB buffers; this avoids the task 826 // aborting due to SATB buffers being available (as we're already 827 // dealing with them) 828 bool _draining_satb_buffers; 829 830 // number sequence of past step times 831 NumberSeq _step_times_ms; 832 // elapsed time of this task 833 double _elapsed_time_ms; 834 // termination time of this task 835 double _termination_time_ms; 836 // when this task got into the termination protocol 837 double _termination_start_time_ms; 838 839 // true when the task is during a concurrent phase, false when it is 840 // in the remark phase (so, in the latter case, we do not have to 841 // check all the things that we have to check during the concurrent 842 // phase, i.e. SATB buffer availability...) 843 bool _concurrent; 844 845 TruncatedSeq _marking_step_diffs_ms; 846 847 // Counting data structures. Embedding the task's marked_bytes_array 848 // and card bitmap into the actual task saves having to go through 849 // the ConcurrentMark object. 850 size_t* _marked_bytes_array; 851 BitMap* _card_bm; 852 853 // it updates the local fields after this task has claimed 854 // a new region to scan 855 void setup_for_region(HeapRegion* hr); 856 // it brings up-to-date the limit of the region 857 void update_region_limit(); 858 859 // called when either the words scanned or the refs visited limit 860 // has been reached 861 void reached_limit(); 862 // recalculates the words scanned and refs visited limits 863 void recalculate_limits(); 864 // decreases the words scanned and refs visited limits when we reach 865 // an expensive operation 866 void decrease_limits(); 867 // it checks whether the words scanned or refs visited reached their 868 // respective limit and calls reached_limit() if they have 869 void check_limits() { 870 if (_words_scanned >= _words_scanned_limit || 871 _refs_reached >= _refs_reached_limit) { 872 reached_limit(); 873 } 874 } 875 // this is supposed to be called regularly during a marking step as 876 // it checks a bunch of conditions that might cause the marking step 877 // to abort 878 void regular_clock_call(); 879 bool concurrent() { return _concurrent; } 880 881 // Test whether obj might have already been passed over by the 882 // mark bitmap scan, and so needs to be pushed onto the mark stack. 883 bool is_below_finger(oop obj, HeapWord* global_finger) const; 884 885 template<bool scan> void process_grey_object(oop obj); 886 887 public: 888 // It resets the task; it should be called right at the beginning of 889 // a marking phase. 890 void reset(G1CMBitMap* _nextMarkBitMap); 891 // it clears all the fields that correspond to a claimed region. 892 void clear_region_fields(); 893 894 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 895 896 // The main method of this class which performs a marking step 897 // trying not to exceed the given duration. However, it might exit 898 // prematurely, according to some conditions (i.e. SATB buffers are 899 // available for processing). 900 void do_marking_step(double target_ms, 901 bool do_termination, 902 bool is_serial); 903 904 // These two calls start and stop the timer 905 void record_start_time() { 906 _elapsed_time_ms = os::elapsedTime() * 1000.0; 907 } 908 void record_end_time() { 909 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 910 } 911 912 // returns the worker ID associated with this task. 913 uint worker_id() { return _worker_id; } 914 915 // From TerminatorTerminator. It determines whether this task should 916 // exit the termination protocol after it's entered it. 917 virtual bool should_exit_termination(); 918 919 // Resets the local region fields after a task has finished scanning a 920 // region; or when they have become stale as a result of the region 921 // being evacuated. 922 void giveup_current_region(); 923 924 HeapWord* finger() { return _finger; } 925 926 bool has_aborted() { return _has_aborted; } 927 void set_has_aborted() { _has_aborted = true; } 928 void clear_has_aborted() { _has_aborted = false; } 929 bool has_timed_out() { return _has_timed_out; } 930 bool claimed() { return _claimed; } 931 932 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 933 934 // Increment the number of references this task has visited. 935 void increment_refs_reached() { ++_refs_reached; } 936 937 // Grey the object by marking it. If not already marked, push it on 938 // the local queue if below the finger. 939 // Precondition: obj is in region. 940 // Precondition: obj is below region's NTAMS. 941 inline void make_reference_grey(oop obj, HeapRegion* region); 942 943 // Grey the object (by calling make_grey_reference) if required, 944 // e.g. obj is below its containing region's NTAMS. 945 // Precondition: obj is a valid heap object. 946 inline void deal_with_reference(oop obj); 947 948 // It scans an object and visits its children. 949 inline void scan_object(oop obj); 950 951 // It pushes an object on the local queue. 952 inline void push(oop obj); 953 954 // These two move entries to/from the global stack. 955 void move_entries_to_global_stack(); 956 void get_entries_from_global_stack(); 957 958 // It pops and scans objects from the local queue. If partially is 959 // true, then it stops when the queue size is of a given limit. If 960 // partially is false, then it stops when the queue is empty. 961 void drain_local_queue(bool partially); 962 // It moves entries from the global stack to the local queue and 963 // drains the local queue. If partially is true, then it stops when 964 // both the global stack and the local queue reach a given size. If 965 // partially if false, it tries to empty them totally. 966 void drain_global_stack(bool partially); 967 // It keeps picking SATB buffers and processing them until no SATB 968 // buffers are available. 969 void drain_satb_buffers(); 970 971 // moves the local finger to a new location 972 inline void move_finger_to(HeapWord* new_finger) { 973 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 974 _finger = new_finger; 975 } 976 977 G1CMTask(uint worker_id, 978 G1ConcurrentMark *cm, 979 size_t* marked_bytes, 980 BitMap* card_bm, 981 G1CMTaskQueue* task_queue, 982 G1CMTaskQueueSet* task_queues); 983 984 // it prints statistics associated with this task 985 void print_stats(); 986 }; 987 988 // Class that's used to to print out per-region liveness 989 // information. It's currently used at the end of marking and also 990 // after we sort the old regions at the end of the cleanup operation. 991 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 992 private: 993 // Accumulators for these values. 994 size_t _total_used_bytes; 995 size_t _total_capacity_bytes; 996 size_t _total_prev_live_bytes; 997 size_t _total_next_live_bytes; 998 999 // Accumulator for the remembered set size 1000 size_t _total_remset_bytes; 1001 1002 // Accumulator for strong code roots memory size 1003 size_t _total_strong_code_roots_bytes; 1004 1005 static double perc(size_t val, size_t total) { 1006 if (total == 0) { 1007 return 0.0; 1008 } else { 1009 return 100.0 * ((double) val / (double) total); 1010 } 1011 } 1012 1013 static double bytes_to_mb(size_t val) { 1014 return (double) val / (double) M; 1015 } 1016 1017 public: 1018 // The header and footer are printed in the constructor and 1019 // destructor respectively. 1020 G1PrintRegionLivenessInfoClosure(const char* phase_name); 1021 virtual bool doHeapRegion(HeapRegion* r); 1022 ~G1PrintRegionLivenessInfoClosure(); 1023 }; 1024 1025 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP