1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1RegionToSpaceMapper.hpp" 30 #include "gc/g1/heapRegionSet.hpp" 31 #include "gc/shared/taskqueue.hpp" 32 33 class G1CollectedHeap; 34 class CMBitMap; 35 class CMTask; 36 class ConcurrentMark; 37 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; 38 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; 39 40 // Closure used by CM during concurrent reference discovery 41 // and reference processing (during remarking) to determine 42 // if a particular object is alive. It is primarily used 43 // to determine if referents of discovered reference objects 44 // are alive. An instance is also embedded into the 45 // reference processor as the _is_alive_non_header field 46 class G1CMIsAliveClosure: public BoolObjectClosure { 47 G1CollectedHeap* _g1; 48 public: 49 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 50 51 bool do_object_b(oop obj); 52 }; 53 54 // A generic CM bit map. This is essentially a wrapper around the BitMap 55 // class, with one bit per (1<<_shifter) HeapWords. 56 57 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { 58 protected: 59 HeapWord* _bmStartWord; // base address of range covered by map 60 size_t _bmWordSize; // map size (in #HeapWords covered) 61 const int _shifter; // map to char or bit 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 CMBitMapRO(int shifter); 67 68 // inquiries 69 HeapWord* startWord() const { return _bmStartWord; } 70 // the following is one past the last word in space 71 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 72 73 // read marks 74 75 bool isMarked(HeapWord* addr) const { 76 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 77 "outside underlying space?"); 78 return _bm.at(heapWordToOffset(addr)); 79 } 80 81 // iteration 82 inline bool iterate(BitMapClosure* cl, MemRegion mr); 83 84 // Return the address corresponding to the next marked bit at or after 85 // "addr", and before "limit", if "limit" is non-NULL. If there is no 86 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 87 HeapWord* getNextMarkedWordAddress(const HeapWord* addr, 88 const HeapWord* limit = NULL) const; 89 90 // conversion utilities 91 HeapWord* offsetToHeapWord(size_t offset) const { 92 return _bmStartWord + (offset << _shifter); 93 } 94 size_t heapWordToOffset(const HeapWord* addr) const { 95 return pointer_delta(addr, _bmStartWord) >> _shifter; 96 } 97 98 // The argument addr should be the start address of a valid object 99 HeapWord* nextObject(HeapWord* addr) { 100 oop obj = (oop) addr; 101 HeapWord* res = addr + obj->size(); 102 assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity"); 103 return res; 104 } 105 106 void print_on_error(outputStream* st, const char* prefix) const; 107 108 // debugging 109 NOT_PRODUCT(bool covers(MemRegion rs) const;) 110 }; 111 112 class CMBitMapMappingChangedListener : public G1MappingChangedListener { 113 private: 114 CMBitMap* _bm; 115 public: 116 CMBitMapMappingChangedListener() : _bm(NULL) {} 117 118 void set_bitmap(CMBitMap* bm) { _bm = bm; } 119 120 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); 121 }; 122 123 class CMBitMap : public CMBitMapRO { 124 private: 125 CMBitMapMappingChangedListener _listener; 126 127 public: 128 static size_t compute_size(size_t heap_size); 129 // Returns the amount of bytes on the heap between two marks in the bitmap. 130 static size_t mark_distance(); 131 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the 132 // mark bitmap corresponds to. This is the same as the mark distance above. 133 static size_t heap_map_factor() { 134 return mark_distance(); 135 } 136 137 CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } 138 139 // Initializes the underlying BitMap to cover the given area. 140 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); 141 142 // Write marks. 143 inline void mark(HeapWord* addr); 144 inline void clear(HeapWord* addr); 145 inline bool parMark(HeapWord* addr); 146 147 void clearRange(MemRegion mr); 148 149 // Clear the whole mark bitmap. 150 void clearAll(); 151 }; 152 153 // Represents a marking stack used by ConcurrentMarking in the G1 collector. 154 class CMMarkStack VALUE_OBJ_CLASS_SPEC { 155 VirtualSpace _virtual_space; // Underlying backing store for actual stack 156 ConcurrentMark* _cm; 157 oop* _base; // bottom of stack 158 jint _index; // one more than last occupied index 159 jint _capacity; // max #elements 160 jint _saved_index; // value of _index saved at start of GC 161 162 bool _overflow; 163 bool _should_expand; 164 165 public: 166 CMMarkStack(ConcurrentMark* cm); 167 ~CMMarkStack(); 168 169 bool allocate(size_t capacity); 170 171 // Pushes the first "n" elements of "ptr_arr" on the stack. 172 // Locking impl: concurrency is allowed only with 173 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 174 // locking strategy. 175 void par_push_arr(oop* ptr_arr, int n); 176 177 // If returns false, the array was empty. Otherwise, removes up to "max" 178 // elements from the stack, and transfers them to "ptr_arr" in an 179 // unspecified order. The actual number transferred is given in "n" ("n 180 // == 0" is deliberately redundant with the return value.) Locking impl: 181 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 182 // operations, which use the same locking strategy. 183 bool par_pop_arr(oop* ptr_arr, int max, int* n); 184 185 bool isEmpty() { return _index == 0; } 186 int maxElems() { return _capacity; } 187 188 bool overflow() { return _overflow; } 189 void clear_overflow() { _overflow = false; } 190 191 bool should_expand() const { return _should_expand; } 192 void set_should_expand(); 193 194 // Expand the stack, typically in response to an overflow condition 195 void expand(); 196 197 int size() { return _index; } 198 199 void setEmpty() { _index = 0; clear_overflow(); } 200 201 // Record the current index. 202 void note_start_of_gc(); 203 204 // Make sure that we have not added any entries to the stack during GC. 205 void note_end_of_gc(); 206 207 // Apply fn to each oop in the mark stack, up to the bound recorded 208 // via one of the above "note" functions. The mark stack must not 209 // be modified while iterating. 210 template<typename Fn> void iterate(Fn fn); 211 }; 212 213 class YoungList; 214 215 // Root Regions are regions that are not empty at the beginning of a 216 // marking cycle and which we might collect during an evacuation pause 217 // while the cycle is active. Given that, during evacuation pauses, we 218 // do not copy objects that are explicitly marked, what we have to do 219 // for the root regions is to scan them and mark all objects reachable 220 // from them. According to the SATB assumptions, we only need to visit 221 // each object once during marking. So, as long as we finish this scan 222 // before the next evacuation pause, we can copy the objects from the 223 // root regions without having to mark them or do anything else to them. 224 // 225 // Currently, we only support root region scanning once (at the start 226 // of the marking cycle) and the root regions are all the survivor 227 // regions populated during the initial-mark pause. 228 class CMRootRegions VALUE_OBJ_CLASS_SPEC { 229 private: 230 YoungList* _young_list; 231 ConcurrentMark* _cm; 232 233 volatile bool _scan_in_progress; 234 volatile bool _should_abort; 235 HeapRegion* volatile _next_survivor; 236 237 public: 238 CMRootRegions(); 239 // We actually do most of the initialization in this method. 240 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); 241 242 // Reset the claiming / scanning of the root regions. 243 void prepare_for_scan(); 244 245 // Forces get_next() to return NULL so that the iteration aborts early. 246 void abort() { _should_abort = true; } 247 248 // Return true if the CM thread are actively scanning root regions, 249 // false otherwise. 250 bool scan_in_progress() { return _scan_in_progress; } 251 252 // Claim the next root region to scan atomically, or return NULL if 253 // all have been claimed. 254 HeapRegion* claim_next(); 255 256 // Flag that we're done with root region scanning and notify anyone 257 // who's waiting on it. If aborted is false, assume that all regions 258 // have been claimed. 259 void scan_finished(); 260 261 // If CM threads are still scanning root regions, wait until they 262 // are done. Return true if we had to wait, false otherwise. 263 bool wait_until_scan_finished(); 264 }; 265 266 class ConcurrentMarkThread; 267 268 class ConcurrentMark: public CHeapObj<mtGC> { 269 friend class CMMarkStack; 270 friend class ConcurrentMarkThread; 271 friend class CMTask; 272 friend class CMBitMapClosure; 273 friend class CMRemarkTask; 274 friend class CMConcurrentMarkingTask; 275 friend class G1ParNoteEndTask; 276 friend class CalcLiveObjectsClosure; 277 friend class G1CMRefProcTaskProxy; 278 friend class G1CMRefProcTaskExecutor; 279 friend class G1CMKeepAliveAndDrainClosure; 280 friend class G1CMDrainMarkingStackClosure; 281 282 protected: 283 ConcurrentMarkThread* _cmThread; // The thread doing the work 284 G1CollectedHeap* _g1h; // The heap 285 uint _parallel_marking_threads; // The number of marking 286 // threads we're using 287 uint _max_parallel_marking_threads; // Max number of marking 288 // threads we'll ever use 289 double _sleep_factor; // How much we have to sleep, with 290 // respect to the work we just did, to 291 // meet the marking overhead goal 292 double _marking_task_overhead; // Marking target overhead for 293 // a single task 294 295 FreeRegionList _cleanup_list; 296 297 // Concurrent marking support structures 298 CMBitMap _markBitMap1; 299 CMBitMap _markBitMap2; 300 CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap 301 CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 302 303 BitMap _region_bm; 304 BitMap _card_bm; 305 306 // Heap bounds 307 HeapWord* _heap_start; 308 HeapWord* _heap_end; 309 310 // Root region tracking and claiming 311 CMRootRegions _root_regions; 312 313 // For gray objects 314 CMMarkStack _markStack; // Grey objects behind global finger 315 HeapWord* volatile _finger; // The global finger, region aligned, 316 // always points to the end of the 317 // last claimed region 318 319 // Marking tasks 320 uint _max_worker_id;// Maximum worker id 321 uint _active_tasks; // Task num currently active 322 CMTask** _tasks; // Task queue array (max_worker_id len) 323 CMTaskQueueSet* _task_queues; // Task queue set 324 ParallelTaskTerminator _terminator; // For termination 325 326 // Two sync barriers that are used to synchronize tasks when an 327 // overflow occurs. The algorithm is the following. All tasks enter 328 // the first one to ensure that they have all stopped manipulating 329 // the global data structures. After they exit it, they re-initialize 330 // their data structures and task 0 re-initializes the global data 331 // structures. Then, they enter the second sync barrier. This 332 // ensure, that no task starts doing work before all data 333 // structures (local and global) have been re-initialized. When they 334 // exit it, they are free to start working again. 335 WorkGangBarrierSync _first_overflow_barrier_sync; 336 WorkGangBarrierSync _second_overflow_barrier_sync; 337 338 // This is set by any task, when an overflow on the global data 339 // structures is detected 340 volatile bool _has_overflown; 341 // True: marking is concurrent, false: we're in remark 342 volatile bool _concurrent; 343 // Set at the end of a Full GC so that marking aborts 344 volatile bool _has_aborted; 345 346 // Used when remark aborts due to an overflow to indicate that 347 // another concurrent marking phase should start 348 volatile bool _restart_for_overflow; 349 350 // This is true from the very start of concurrent marking until the 351 // point when all the tasks complete their work. It is really used 352 // to determine the points between the end of concurrent marking and 353 // time of remark. 354 volatile bool _concurrent_marking_in_progress; 355 356 // True only inside of markFromRoots(). 357 // Similar to _concurrent_marking_in_progress but this is set to false 358 // when CMConcurrentMarkingTask is finished. 359 volatile bool _concurrent_marking_from_roots; 360 361 // All of these times are in ms 362 NumberSeq _init_times; 363 NumberSeq _remark_times; 364 NumberSeq _remark_mark_times; 365 NumberSeq _remark_weak_ref_times; 366 NumberSeq _cleanup_times; 367 double _total_counting_time; 368 double _total_rs_scrub_time; 369 370 double* _accum_task_vtime; // Accumulated task vtime 371 372 WorkGang* _parallel_workers; 373 374 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 375 void weakRefsWork(bool clear_all_soft_refs); 376 377 void swapMarkBitMaps(); 378 379 // It resets the global marking data structures, as well as the 380 // task local ones; should be called during initial mark. 381 void reset(); 382 383 // Resets all the marking data structures. Called when we have to restart 384 // marking or when marking completes (via set_non_marking_state below). 385 void reset_marking_state(bool clear_overflow = true); 386 387 // We do this after we're done with marking so that the marking data 388 // structures are initialized to a sensible and predictable state. 389 void set_non_marking_state(); 390 391 // Called to indicate how many threads are currently active. 392 void set_concurrency(uint active_tasks); 393 394 // It should be called to indicate which phase we're in (concurrent 395 // mark or remark) and how many threads are currently active. 396 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 397 398 // Prints all gathered CM-related statistics 399 void print_stats(); 400 401 bool cleanup_list_is_empty() { 402 return _cleanup_list.is_empty(); 403 } 404 405 // Accessor methods 406 uint parallel_marking_threads() const { return _parallel_marking_threads; } 407 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 408 double sleep_factor() { return _sleep_factor; } 409 double marking_task_overhead() { return _marking_task_overhead;} 410 411 HeapWord* finger() { return _finger; } 412 bool concurrent() { return _concurrent; } 413 uint active_tasks() { return _active_tasks; } 414 ParallelTaskTerminator* terminator() { return &_terminator; } 415 416 // It claims the next available region to be scanned by a marking 417 // task/thread. It might return NULL if the next region is empty or 418 // we have run out of regions. In the latter case, out_of_regions() 419 // determines whether we've really run out of regions or the task 420 // should call claim_region() again. This might seem a bit 421 // awkward. Originally, the code was written so that claim_region() 422 // either successfully returned with a non-empty region or there 423 // were no more regions to be claimed. The problem with this was 424 // that, in certain circumstances, it iterated over large chunks of 425 // the heap finding only empty regions and, while it was working, it 426 // was preventing the calling task to call its regular clock 427 // method. So, this way, each task will spend very little time in 428 // claim_region() and is allowed to call the regular clock method 429 // frequently. 430 HeapRegion* claim_region(uint worker_id); 431 432 // It determines whether we've run out of regions to scan. Note that 433 // the finger can point past the heap end in case the heap was expanded 434 // to satisfy an allocation without doing a GC. This is fine, because all 435 // objects in those regions will be considered live anyway because of 436 // SATB guarantees (i.e. their TAMS will be equal to bottom). 437 bool out_of_regions() { return _finger >= _heap_end; } 438 439 // Returns the task with the given id 440 CMTask* task(int id) { 441 assert(0 <= id && id < (int) _active_tasks, 442 "task id not within active bounds"); 443 return _tasks[id]; 444 } 445 446 // Returns the task queue with the given id 447 CMTaskQueue* task_queue(int id) { 448 assert(0 <= id && id < (int) _active_tasks, 449 "task queue id not within active bounds"); 450 return (CMTaskQueue*) _task_queues->queue(id); 451 } 452 453 // Returns the task queue set 454 CMTaskQueueSet* task_queues() { return _task_queues; } 455 456 // Access / manipulation of the overflow flag which is set to 457 // indicate that the global stack has overflown 458 bool has_overflown() { return _has_overflown; } 459 void set_has_overflown() { _has_overflown = true; } 460 void clear_has_overflown() { _has_overflown = false; } 461 bool restart_for_overflow() { return _restart_for_overflow; } 462 463 // Methods to enter the two overflow sync barriers 464 void enter_first_sync_barrier(uint worker_id); 465 void enter_second_sync_barrier(uint worker_id); 466 467 // Start measuring concurrent mark from ConcurrentMark::markFromRoots(). 468 void register_mark_from_roots_phase_start(); 469 470 // End measuring concurrent mark from ConcurrentMark::markFromRoots(). 471 void register_mark_from_roots_phase_end(); 472 473 // Live Data Counting data structures... 474 // These data structures are initialized at the start of 475 // marking. They are written to while marking is active. 476 // They are aggregated during remark; the aggregated values 477 // are then used to populate the _region_bm, _card_bm, and 478 // the total live bytes, which are then subsequently updated 479 // during cleanup. 480 481 // An array of bitmaps (one bit map per task). Each bitmap 482 // is used to record the cards spanned by the live objects 483 // marked by that task/worker. 484 BitMap* _count_card_bitmaps; 485 486 // Used to record the number of marked live bytes 487 // (for each region, by worker thread). 488 size_t** _count_marked_bytes; 489 490 // Card index of the bottom of the G1 heap. Used for biasing indices into 491 // the card bitmaps. 492 intptr_t _heap_bottom_card_num; 493 494 // Set to true when initialization is complete 495 bool _completed_initialization; 496 497 public: 498 // Manipulation of the global mark stack. 499 // The push and pop operations are used by tasks for transfers 500 // between task-local queues and the global mark stack, and use 501 // locking for concurrency safety. 502 bool mark_stack_push(oop* arr, int n) { 503 _markStack.par_push_arr(arr, n); 504 if (_markStack.overflow()) { 505 set_has_overflown(); 506 return false; 507 } 508 return true; 509 } 510 void mark_stack_pop(oop* arr, int max, int* n) { 511 _markStack.par_pop_arr(arr, max, n); 512 } 513 size_t mark_stack_size() { return _markStack.size(); } 514 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 515 bool mark_stack_overflow() { return _markStack.overflow(); } 516 bool mark_stack_empty() { return _markStack.isEmpty(); } 517 518 CMRootRegions* root_regions() { return &_root_regions; } 519 520 bool concurrent_marking_in_progress() { 521 return _concurrent_marking_in_progress; 522 } 523 void set_concurrent_marking_in_progress() { 524 _concurrent_marking_in_progress = true; 525 } 526 void clear_concurrent_marking_in_progress() { 527 _concurrent_marking_in_progress = false; 528 } 529 530 bool concurrent_marking_from_roots() const { 531 return _concurrent_marking_from_roots; 532 } 533 534 void update_accum_task_vtime(int i, double vtime) { 535 _accum_task_vtime[i] += vtime; 536 } 537 538 double all_task_accum_vtime() { 539 double ret = 0.0; 540 for (uint i = 0; i < _max_worker_id; ++i) 541 ret += _accum_task_vtime[i]; 542 return ret; 543 } 544 545 // Attempts to steal an object from the task queues of other tasks 546 bool try_stealing(uint worker_id, int* hash_seed, oop& obj); 547 548 ConcurrentMark(G1CollectedHeap* g1h, 549 G1RegionToSpaceMapper* prev_bitmap_storage, 550 G1RegionToSpaceMapper* next_bitmap_storage); 551 ~ConcurrentMark(); 552 553 ConcurrentMarkThread* cmThread() { return _cmThread; } 554 555 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 556 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 557 558 // Returns the number of GC threads to be used in a concurrent 559 // phase based on the number of GC threads being used in a STW 560 // phase. 561 uint scale_parallel_threads(uint n_par_threads); 562 563 // Calculates the number of GC threads to be used in a concurrent phase. 564 uint calc_parallel_marking_threads(); 565 566 // The following three are interaction between CM and 567 // G1CollectedHeap 568 569 // This notifies CM that a root during initial-mark needs to be 570 // grayed. It is MT-safe. word_size is the size of the object in 571 // words. It is passed explicitly as sometimes we cannot calculate 572 // it from the given object because it might be in an inconsistent 573 // state (e.g., in to-space and being copied). So the caller is 574 // responsible for dealing with this issue (e.g., get the size from 575 // the from-space image when the to-space image might be 576 // inconsistent) and always passing the size. hr is the region that 577 // contains the object and it's passed optionally from callers who 578 // might already have it (no point in recalculating it). 579 inline void grayRoot(oop obj, 580 size_t word_size, 581 uint worker_id, 582 HeapRegion* hr = NULL); 583 584 // Clear the next marking bitmap (will be called concurrently). 585 void clearNextBitmap(); 586 587 // Return whether the next mark bitmap has no marks set. To be used for assertions 588 // only. Will not yield to pause requests. 589 bool nextMarkBitmapIsClear(); 590 591 // These two do the work that needs to be done before and after the 592 // initial root checkpoint. Since this checkpoint can be done at two 593 // different points (i.e. an explicit pause or piggy-backed on a 594 // young collection), then it's nice to be able to easily share the 595 // pre/post code. It might be the case that we can put everything in 596 // the post method. TP 597 void checkpointRootsInitialPre(); 598 void checkpointRootsInitialPost(); 599 600 // Scan all the root regions and mark everything reachable from 601 // them. 602 void scanRootRegions(); 603 604 // Scan a single root region and mark everything reachable from it. 605 void scanRootRegion(HeapRegion* hr, uint worker_id); 606 607 // Do concurrent phase of marking, to a tentative transitive closure. 608 void markFromRoots(); 609 610 void checkpointRootsFinal(bool clear_all_soft_refs); 611 void checkpointRootsFinalWork(); 612 void cleanup(); 613 void completeCleanup(); 614 615 // Mark in the previous bitmap. NB: this is usually read-only, so use 616 // this carefully! 617 inline void markPrev(oop p); 618 619 // Clears marks for all objects in the given range, for the prev or 620 // next bitmaps. NB: the previous bitmap is usually 621 // read-only, so use this carefully! 622 void clearRangePrevBitmap(MemRegion mr); 623 624 // Notify data structures that a GC has started. 625 void note_start_of_gc() { 626 _markStack.note_start_of_gc(); 627 } 628 629 // Notify data structures that a GC is finished. 630 void note_end_of_gc() { 631 _markStack.note_end_of_gc(); 632 } 633 634 // Verify that there are no CSet oops on the stacks (taskqueues / 635 // global mark stack) and fingers (global / per-task). 636 // If marking is not in progress, it's a no-op. 637 void verify_no_cset_oops() PRODUCT_RETURN; 638 639 bool isPrevMarked(oop p) const { 640 assert(p != NULL && p->is_oop(), "expected an oop"); 641 HeapWord* addr = (HeapWord*)p; 642 assert(addr >= _prevMarkBitMap->startWord() || 643 addr < _prevMarkBitMap->endWord(), "in a region"); 644 645 return _prevMarkBitMap->isMarked(addr); 646 } 647 648 inline bool do_yield_check(uint worker_i = 0); 649 650 // Called to abort the marking cycle after a Full GC takes place. 651 void abort(); 652 653 bool has_aborted() { return _has_aborted; } 654 655 void print_summary_info(); 656 657 void print_worker_threads_on(outputStream* st) const; 658 659 void print_on_error(outputStream* st) const; 660 661 // Liveness counting 662 663 // Utility routine to set an exclusive range of cards on the given 664 // card liveness bitmap 665 inline void set_card_bitmap_range(BitMap* card_bm, 666 BitMap::idx_t start_idx, 667 BitMap::idx_t end_idx, 668 bool is_par); 669 670 // Returns the card number of the bottom of the G1 heap. 671 // Used in biasing indices into accounting card bitmaps. 672 intptr_t heap_bottom_card_num() const { 673 return _heap_bottom_card_num; 674 } 675 676 // Returns the card bitmap for a given task or worker id. 677 BitMap* count_card_bitmap_for(uint worker_id) { 678 assert(worker_id < _max_worker_id, "oob"); 679 assert(_count_card_bitmaps != NULL, "uninitialized"); 680 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 681 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 682 return task_card_bm; 683 } 684 685 // Returns the array containing the marked bytes for each region, 686 // for the given worker or task id. 687 size_t* count_marked_bytes_array_for(uint worker_id) { 688 assert(worker_id < _max_worker_id, "oob"); 689 assert(_count_marked_bytes != NULL, "uninitialized"); 690 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 691 assert(marked_bytes_array != NULL, "uninitialized"); 692 return marked_bytes_array; 693 } 694 695 // Returns the index in the liveness accounting card table bitmap 696 // for the given address 697 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 698 699 // Counts the size of the given memory region in the the given 700 // marked_bytes array slot for the given HeapRegion. 701 // Sets the bits in the given card bitmap that are associated with the 702 // cards that are spanned by the memory region. 703 inline void count_region(MemRegion mr, 704 HeapRegion* hr, 705 size_t* marked_bytes_array, 706 BitMap* task_card_bm); 707 708 // Counts the given object in the given task/worker counting 709 // data structures. 710 inline void count_object(oop obj, 711 HeapRegion* hr, 712 size_t* marked_bytes_array, 713 BitMap* task_card_bm, 714 size_t word_size); 715 716 // Attempts to mark the given object and, if successful, counts 717 // the object in the given task/worker counting structures. 718 inline bool par_mark_and_count(oop obj, 719 HeapRegion* hr, 720 size_t* marked_bytes_array, 721 BitMap* task_card_bm); 722 723 // Attempts to mark the given object and, if successful, counts 724 // the object in the task/worker counting structures for the 725 // given worker id. 726 inline bool par_mark_and_count(oop obj, 727 size_t word_size, 728 HeapRegion* hr, 729 uint worker_id); 730 731 // Returns true if initialization was successfully completed. 732 bool completed_initialization() const { 733 return _completed_initialization; 734 } 735 736 protected: 737 // Clear all the per-task bitmaps and arrays used to store the 738 // counting data. 739 void clear_all_count_data(); 740 741 // Aggregates the counting data for each worker/task 742 // that was constructed while marking. Also sets 743 // the amount of marked bytes for each region and 744 // the top at concurrent mark count. 745 void aggregate_count_data(); 746 747 // Verification routine 748 void verify_count_data(); 749 }; 750 751 // A class representing a marking task. 752 class CMTask : public TerminatorTerminator { 753 private: 754 enum PrivateConstants { 755 // the regular clock call is called once the scanned words reaches 756 // this limit 757 words_scanned_period = 12*1024, 758 // the regular clock call is called once the number of visited 759 // references reaches this limit 760 refs_reached_period = 384, 761 // initial value for the hash seed, used in the work stealing code 762 init_hash_seed = 17, 763 // how many entries will be transferred between global stack and 764 // local queues 765 global_stack_transfer_size = 16 766 }; 767 768 uint _worker_id; 769 G1CollectedHeap* _g1h; 770 ConcurrentMark* _cm; 771 CMBitMap* _nextMarkBitMap; 772 // the task queue of this task 773 CMTaskQueue* _task_queue; 774 private: 775 // the task queue set---needed for stealing 776 CMTaskQueueSet* _task_queues; 777 // indicates whether the task has been claimed---this is only for 778 // debugging purposes 779 bool _claimed; 780 781 // number of calls to this task 782 int _calls; 783 784 // when the virtual timer reaches this time, the marking step should 785 // exit 786 double _time_target_ms; 787 // the start time of the current marking step 788 double _start_time_ms; 789 790 // the oop closure used for iterations over oops 791 G1CMOopClosure* _cm_oop_closure; 792 793 // the region this task is scanning, NULL if we're not scanning any 794 HeapRegion* _curr_region; 795 // the local finger of this task, NULL if we're not scanning a region 796 HeapWord* _finger; 797 // limit of the region this task is scanning, NULL if we're not scanning one 798 HeapWord* _region_limit; 799 800 // the number of words this task has scanned 801 size_t _words_scanned; 802 // When _words_scanned reaches this limit, the regular clock is 803 // called. Notice that this might be decreased under certain 804 // circumstances (i.e. when we believe that we did an expensive 805 // operation). 806 size_t _words_scanned_limit; 807 // the initial value of _words_scanned_limit (i.e. what it was 808 // before it was decreased). 809 size_t _real_words_scanned_limit; 810 811 // the number of references this task has visited 812 size_t _refs_reached; 813 // When _refs_reached reaches this limit, the regular clock is 814 // called. Notice this this might be decreased under certain 815 // circumstances (i.e. when we believe that we did an expensive 816 // operation). 817 size_t _refs_reached_limit; 818 // the initial value of _refs_reached_limit (i.e. what it was before 819 // it was decreased). 820 size_t _real_refs_reached_limit; 821 822 // used by the work stealing stuff 823 int _hash_seed; 824 // if this is true, then the task has aborted for some reason 825 bool _has_aborted; 826 // set when the task aborts because it has met its time quota 827 bool _has_timed_out; 828 // true when we're draining SATB buffers; this avoids the task 829 // aborting due to SATB buffers being available (as we're already 830 // dealing with them) 831 bool _draining_satb_buffers; 832 833 // number sequence of past step times 834 NumberSeq _step_times_ms; 835 // elapsed time of this task 836 double _elapsed_time_ms; 837 // termination time of this task 838 double _termination_time_ms; 839 // when this task got into the termination protocol 840 double _termination_start_time_ms; 841 842 // true when the task is during a concurrent phase, false when it is 843 // in the remark phase (so, in the latter case, we do not have to 844 // check all the things that we have to check during the concurrent 845 // phase, i.e. SATB buffer availability...) 846 bool _concurrent; 847 848 TruncatedSeq _marking_step_diffs_ms; 849 850 // Counting data structures. Embedding the task's marked_bytes_array 851 // and card bitmap into the actual task saves having to go through 852 // the ConcurrentMark object. 853 size_t* _marked_bytes_array; 854 BitMap* _card_bm; 855 856 // it updates the local fields after this task has claimed 857 // a new region to scan 858 void setup_for_region(HeapRegion* hr); 859 // it brings up-to-date the limit of the region 860 void update_region_limit(); 861 862 // called when either the words scanned or the refs visited limit 863 // has been reached 864 void reached_limit(); 865 // recalculates the words scanned and refs visited limits 866 void recalculate_limits(); 867 // decreases the words scanned and refs visited limits when we reach 868 // an expensive operation 869 void decrease_limits(); 870 // it checks whether the words scanned or refs visited reached their 871 // respective limit and calls reached_limit() if they have 872 void check_limits() { 873 if (_words_scanned >= _words_scanned_limit || 874 _refs_reached >= _refs_reached_limit) { 875 reached_limit(); 876 } 877 } 878 // this is supposed to be called regularly during a marking step as 879 // it checks a bunch of conditions that might cause the marking step 880 // to abort 881 void regular_clock_call(); 882 bool concurrent() { return _concurrent; } 883 884 // Test whether obj might have already been passed over by the 885 // mark bitmap scan, and so needs to be pushed onto the mark stack. 886 bool is_below_finger(oop obj, HeapWord* global_finger) const; 887 888 template<bool scan> void process_grey_object(oop obj); 889 890 public: 891 // It resets the task; it should be called right at the beginning of 892 // a marking phase. 893 void reset(CMBitMap* _nextMarkBitMap); 894 // it clears all the fields that correspond to a claimed region. 895 void clear_region_fields(); 896 897 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 898 899 // The main method of this class which performs a marking step 900 // trying not to exceed the given duration. However, it might exit 901 // prematurely, according to some conditions (i.e. SATB buffers are 902 // available for processing). 903 void do_marking_step(double target_ms, 904 bool do_termination, 905 bool is_serial); 906 907 // These two calls start and stop the timer 908 void record_start_time() { 909 _elapsed_time_ms = os::elapsedTime() * 1000.0; 910 } 911 void record_end_time() { 912 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 913 } 914 915 // returns the worker ID associated with this task. 916 uint worker_id() { return _worker_id; } 917 918 // From TerminatorTerminator. It determines whether this task should 919 // exit the termination protocol after it's entered it. 920 virtual bool should_exit_termination(); 921 922 // Resets the local region fields after a task has finished scanning a 923 // region; or when they have become stale as a result of the region 924 // being evacuated. 925 void giveup_current_region(); 926 927 HeapWord* finger() { return _finger; } 928 929 bool has_aborted() { return _has_aborted; } 930 void set_has_aborted() { _has_aborted = true; } 931 void clear_has_aborted() { _has_aborted = false; } 932 bool has_timed_out() { return _has_timed_out; } 933 bool claimed() { return _claimed; } 934 935 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 936 937 // Increment the number of references this task has visited. 938 void increment_refs_reached() { ++_refs_reached; } 939 940 // Grey the object by marking it. If not already marked, push it on 941 // the local queue if below the finger. 942 // Precondition: obj is in region. 943 // Precondition: obj is below region's NTAMS. 944 inline void make_reference_grey(oop obj, HeapRegion* region); 945 946 // Grey the object (by calling make_grey_reference) if required, 947 // e.g. obj is below its containing region's NTAMS. 948 // Precondition: obj is a valid heap object. 949 inline void deal_with_reference(oop obj); 950 951 // It scans an object and visits its children. 952 inline void scan_object(oop obj); 953 954 // It pushes an object on the local queue. 955 inline void push(oop obj); 956 957 // These two move entries to/from the global stack. 958 void move_entries_to_global_stack(); 959 void get_entries_from_global_stack(); 960 961 // It pops and scans objects from the local queue. If partially is 962 // true, then it stops when the queue size is of a given limit. If 963 // partially is false, then it stops when the queue is empty. 964 void drain_local_queue(bool partially); 965 // It moves entries from the global stack to the local queue and 966 // drains the local queue. If partially is true, then it stops when 967 // both the global stack and the local queue reach a given size. If 968 // partially if false, it tries to empty them totally. 969 void drain_global_stack(bool partially); 970 // It keeps picking SATB buffers and processing them until no SATB 971 // buffers are available. 972 void drain_satb_buffers(); 973 974 // moves the local finger to a new location 975 inline void move_finger_to(HeapWord* new_finger) { 976 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 977 _finger = new_finger; 978 } 979 980 CMTask(uint worker_id, 981 ConcurrentMark *cm, 982 size_t* marked_bytes, 983 BitMap* card_bm, 984 CMTaskQueue* task_queue, 985 CMTaskQueueSet* task_queues); 986 987 // it prints statistics associated with this task 988 void print_stats(); 989 }; 990 991 // Class that's used to to print out per-region liveness 992 // information. It's currently used at the end of marking and also 993 // after we sort the old regions at the end of the cleanup operation. 994 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 995 private: 996 // Accumulators for these values. 997 size_t _total_used_bytes; 998 size_t _total_capacity_bytes; 999 size_t _total_prev_live_bytes; 1000 size_t _total_next_live_bytes; 1001 1002 // These are set up when we come across a "stars humongous" region 1003 // (as this is where most of this information is stored, not in the 1004 // subsequent "continues humongous" regions). After that, for every 1005 // region in a given humongous region series we deduce the right 1006 // values for it by simply subtracting the appropriate amount from 1007 // these fields. All these values should reach 0 after we've visited 1008 // the last region in the series. 1009 size_t _hum_used_bytes; 1010 size_t _hum_capacity_bytes; 1011 size_t _hum_prev_live_bytes; 1012 size_t _hum_next_live_bytes; 1013 1014 // Accumulator for the remembered set size 1015 size_t _total_remset_bytes; 1016 1017 // Accumulator for strong code roots memory size 1018 size_t _total_strong_code_roots_bytes; 1019 1020 static double perc(size_t val, size_t total) { 1021 if (total == 0) { 1022 return 0.0; 1023 } else { 1024 return 100.0 * ((double) val / (double) total); 1025 } 1026 } 1027 1028 static double bytes_to_mb(size_t val) { 1029 return (double) val / (double) M; 1030 } 1031 1032 // See the .cpp file. 1033 size_t get_hum_bytes(size_t* hum_bytes); 1034 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 1035 size_t* prev_live_bytes, size_t* next_live_bytes); 1036 1037 public: 1038 // The header and footer are printed in the constructor and 1039 // destructor respectively. 1040 G1PrintRegionLivenessInfoClosure(const char* phase_name); 1041 virtual bool doHeapRegion(HeapRegion* r); 1042 ~G1PrintRegionLivenessInfoClosure(); 1043 }; 1044 1045 #endif // SHARE_VM_GC_G1_CONCURRENTMARK_HPP