1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 27 28 #include "gc_implementation/g1/heapRegionSets.hpp" 29 #include "utilities/taskqueue.hpp" 30 31 class G1CollectedHeap; 32 class CMTask; 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; 35 36 // Closure used by CM during concurrent reference discovery 37 // and reference processing (during remarking) to determine 38 // if a particular object is alive. It is primarily used 39 // to determine if referents of discovered reference objects 40 // are alive. An instance is also embedded into the 41 // reference processor as the _is_alive_non_header field 42 class G1CMIsAliveClosure: public BoolObjectClosure { 43 G1CollectedHeap* _g1; 44 public: 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 46 47 void do_object(oop obj) { 48 ShouldNotCallThis(); 49 } 50 bool do_object_b(oop obj); 51 }; 52 53 // A generic CM bit map. This is essentially a wrapper around the BitMap 54 // class, with one bit per (1<<_shifter) HeapWords. 55 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { 57 protected: 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // map to char or bit 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 CMBitMapRO(int shifter); 67 68 enum { do_yield = true }; 69 70 // inquiries 71 HeapWord* startWord() const { return _bmStartWord; } 72 size_t sizeInWords() const { return _bmWordSize; } 73 // the following is one past the last word in space 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 75 76 // read marks 77 78 bool isMarked(HeapWord* addr) const { 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 80 "outside underlying space?"); 81 return _bm.at(heapWordToOffset(addr)); 82 } 83 84 // iteration 85 inline bool iterate(BitMapClosure* cl, MemRegion mr); 86 inline bool iterate(BitMapClosure* cl); 87 88 // Return the address corresponding to the next marked bit at or after 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr, 92 HeapWord* limit = NULL) const; 93 // Return the address corresponding to the next unmarked bit at or after 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, 97 HeapWord* limit = NULL) const; 98 99 // conversion utilities 100 // XXX Fix these so that offsets are size_t's... 101 HeapWord* offsetToHeapWord(size_t offset) const { 102 return _bmStartWord + (offset << _shifter); 103 } 104 size_t heapWordToOffset(HeapWord* addr) const { 105 return pointer_delta(addr, _bmStartWord) >> _shifter; 106 } 107 int heapWordDiffToOffsetDiff(size_t diff) const; 108 109 HeapWord* nextObject(HeapWord* addr) { 110 oop obj = (oop) addr; 111 return addr + obj->size(); 112 } 113 114 // debugging 115 NOT_PRODUCT(bool covers(ReservedSpace rs) const;) 116 }; 117 118 class CMBitMap : public CMBitMapRO { 119 120 public: 121 // constructor 122 CMBitMap(int shifter) : 123 CMBitMapRO(shifter) {} 124 125 // Allocates the back store for the marking bitmap 126 bool allocate(ReservedSpace heap_rs); 127 128 // write marks 129 void mark(HeapWord* addr) { 130 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 131 "outside underlying space?"); 132 _bm.set_bit(heapWordToOffset(addr)); 133 } 134 void clear(HeapWord* addr) { 135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 136 "outside underlying space?"); 137 _bm.clear_bit(heapWordToOffset(addr)); 138 } 139 bool parMark(HeapWord* addr) { 140 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 141 "outside underlying space?"); 142 return _bm.par_set_bit(heapWordToOffset(addr)); 143 } 144 bool parClear(HeapWord* addr) { 145 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 146 "outside underlying space?"); 147 return _bm.par_clear_bit(heapWordToOffset(addr)); 148 } 149 void markRange(MemRegion mr); 150 void clearAll(); 151 void clearRange(MemRegion mr); 152 153 // Starting at the bit corresponding to "addr" (inclusive), find the next 154 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find 155 // the end of this run (stopping at "end_addr"). Return the MemRegion 156 // covering from the start of the region corresponding to the first bit 157 // of the run to the end of the region corresponding to the last bit of 158 // the run. If there is no "1" bit at or after "addr", return an empty 159 // MemRegion. 160 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); 161 }; 162 163 // Represents a marking stack used by ConcurrentMarking in the G1 collector. 164 class CMMarkStack VALUE_OBJ_CLASS_SPEC { 165 VirtualSpace _virtual_space; // Underlying backing store for actual stack 166 ConcurrentMark* _cm; 167 oop* _base; // bottom of stack 168 jint _index; // one more than last occupied index 169 jint _capacity; // max #elements 170 jint _saved_index; // value of _index saved at start of GC 171 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run 172 173 bool _overflow; 174 bool _should_expand; 175 DEBUG_ONLY(bool _drain_in_progress;) 176 DEBUG_ONLY(bool _drain_in_progress_yields;) 177 178 public: 179 CMMarkStack(ConcurrentMark* cm); 180 ~CMMarkStack(); 181 182 #ifndef PRODUCT 183 jint max_depth() const { 184 return _max_depth; 185 } 186 #endif 187 188 bool allocate(size_t capacity); 189 190 oop pop() { 191 if (!isEmpty()) { 192 return _base[--_index] ; 193 } 194 return NULL; 195 } 196 197 // If overflow happens, don't do the push, and record the overflow. 198 // *Requires* that "ptr" is already marked. 199 void push(oop ptr) { 200 if (isFull()) { 201 // Record overflow. 202 _overflow = true; 203 return; 204 } else { 205 _base[_index++] = ptr; 206 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 207 } 208 } 209 // Non-block impl. Note: concurrency is allowed only with other 210 // "par_push" operations, not with "pop" or "drain". We would need 211 // parallel versions of them if such concurrency was desired. 212 void par_push(oop ptr); 213 214 // Pushes the first "n" elements of "ptr_arr" on the stack. 215 // Non-block impl. Note: concurrency is allowed only with other 216 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". 217 void par_adjoin_arr(oop* ptr_arr, int n); 218 219 // Pushes the first "n" elements of "ptr_arr" on the stack. 220 // Locking impl: concurrency is allowed only with 221 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 222 // locking strategy. 223 void par_push_arr(oop* ptr_arr, int n); 224 225 // If returns false, the array was empty. Otherwise, removes up to "max" 226 // elements from the stack, and transfers them to "ptr_arr" in an 227 // unspecified order. The actual number transferred is given in "n" ("n 228 // == 0" is deliberately redundant with the return value.) Locking impl: 229 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 230 // operations, which use the same locking strategy. 231 bool par_pop_arr(oop* ptr_arr, int max, int* n); 232 233 // Drain the mark stack, applying the given closure to all fields of 234 // objects on the stack. (That is, continue until the stack is empty, 235 // even if closure applications add entries to the stack.) The "bm" 236 // argument, if non-null, may be used to verify that only marked objects 237 // are on the mark stack. If "yield_after" is "true", then the 238 // concurrent marker performing the drain offers to yield after 239 // processing each object. If a yield occurs, stops the drain operation 240 // and returns false. Otherwise, returns true. 241 template<class OopClosureClass> 242 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); 243 244 bool isEmpty() { return _index == 0; } 245 bool isFull() { return _index == _capacity; } 246 int maxElems() { return _capacity; } 247 248 bool overflow() { return _overflow; } 249 void clear_overflow() { _overflow = false; } 250 251 bool should_expand() const { return _should_expand; } 252 void set_should_expand(); 253 254 // Expand the stack, typically in response to an overflow condition 255 void expand(); 256 257 int size() { return _index; } 258 259 void setEmpty() { _index = 0; clear_overflow(); } 260 261 // Record the current index. 262 void note_start_of_gc(); 263 264 // Make sure that we have not added any entries to the stack during GC. 265 void note_end_of_gc(); 266 267 // iterate over the oops in the mark stack, up to the bound recorded via 268 // the call above. 269 void oops_do(OopClosure* f); 270 }; 271 272 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { 273 private: 274 #ifndef PRODUCT 275 uintx _num_remaining; 276 bool _force; 277 #endif // !defined(PRODUCT) 278 279 public: 280 void init() PRODUCT_RETURN; 281 void update() PRODUCT_RETURN; 282 bool should_force() PRODUCT_RETURN_( return false; ); 283 }; 284 285 // this will enable a variety of different statistics per GC task 286 #define _MARKING_STATS_ 0 287 // this will enable the higher verbose levels 288 #define _MARKING_VERBOSE_ 0 289 290 #if _MARKING_STATS_ 291 #define statsOnly(statement) \ 292 do { \ 293 statement ; \ 294 } while (0) 295 #else // _MARKING_STATS_ 296 #define statsOnly(statement) \ 297 do { \ 298 } while (0) 299 #endif // _MARKING_STATS_ 300 301 typedef enum { 302 no_verbose = 0, // verbose turned off 303 stats_verbose, // only prints stats at the end of marking 304 low_verbose, // low verbose, mostly per region and per major event 305 medium_verbose, // a bit more detailed than low 306 high_verbose // per object verbose 307 } CMVerboseLevel; 308 309 class YoungList; 310 311 // Root Regions are regions that are not empty at the beginning of a 312 // marking cycle and which we might collect during an evacuation pause 313 // while the cycle is active. Given that, during evacuation pauses, we 314 // do not copy objects that are explicitly marked, what we have to do 315 // for the root regions is to scan them and mark all objects reachable 316 // from them. According to the SATB assumptions, we only need to visit 317 // each object once during marking. So, as long as we finish this scan 318 // before the next evacuation pause, we can copy the objects from the 319 // root regions without having to mark them or do anything else to them. 320 // 321 // Currently, we only support root region scanning once (at the start 322 // of the marking cycle) and the root regions are all the survivor 323 // regions populated during the initial-mark pause. 324 class CMRootRegions VALUE_OBJ_CLASS_SPEC { 325 private: 326 YoungList* _young_list; 327 ConcurrentMark* _cm; 328 329 volatile bool _scan_in_progress; 330 volatile bool _should_abort; 331 HeapRegion* volatile _next_survivor; 332 333 public: 334 CMRootRegions(); 335 // We actually do most of the initialization in this method. 336 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); 337 338 // Reset the claiming / scanning of the root regions. 339 void prepare_for_scan(); 340 341 // Forces get_next() to return NULL so that the iteration aborts early. 342 void abort() { _should_abort = true; } 343 344 // Return true if the CM thread are actively scanning root regions, 345 // false otherwise. 346 bool scan_in_progress() { return _scan_in_progress; } 347 348 // Claim the next root region to scan atomically, or return NULL if 349 // all have been claimed. 350 HeapRegion* claim_next(); 351 352 // Flag that we're done with root region scanning and notify anyone 353 // who's waiting on it. If aborted is false, assume that all regions 354 // have been claimed. 355 void scan_finished(); 356 357 // If CM threads are still scanning root regions, wait until they 358 // are done. Return true if we had to wait, false otherwise. 359 bool wait_until_scan_finished(); 360 }; 361 362 class ConcurrentMarkThread; 363 364 class ConcurrentMark: public CHeapObj<mtGC> { 365 friend class CMMarkStack; 366 friend class ConcurrentMarkThread; 367 friend class CMTask; 368 friend class CMBitMapClosure; 369 friend class CMGlobalObjectClosure; 370 friend class CMRemarkTask; 371 friend class CMConcurrentMarkingTask; 372 friend class G1ParNoteEndTask; 373 friend class CalcLiveObjectsClosure; 374 friend class G1CMRefProcTaskProxy; 375 friend class G1CMRefProcTaskExecutor; 376 friend class G1CMKeepAliveAndDrainClosure; 377 friend class G1CMDrainMarkingStackClosure; 378 379 protected: 380 ConcurrentMarkThread* _cmThread; // the thread doing the work 381 G1CollectedHeap* _g1h; // the heap. 382 uint _parallel_marking_threads; // the number of marking 383 // threads we're use 384 uint _max_parallel_marking_threads; // max number of marking 385 // threads we'll ever use 386 double _sleep_factor; // how much we have to sleep, with 387 // respect to the work we just did, to 388 // meet the marking overhead goal 389 double _marking_task_overhead; // marking target overhead for 390 // a single task 391 392 // same as the two above, but for the cleanup task 393 double _cleanup_sleep_factor; 394 double _cleanup_task_overhead; 395 396 FreeRegionList _cleanup_list; 397 398 // Concurrent marking support structures 399 CMBitMap _markBitMap1; 400 CMBitMap _markBitMap2; 401 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap 402 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap 403 404 BitMap _region_bm; 405 BitMap _card_bm; 406 407 // Heap bounds 408 HeapWord* _heap_start; 409 HeapWord* _heap_end; 410 411 // Root region tracking and claiming. 412 CMRootRegions _root_regions; 413 414 // For gray objects 415 CMMarkStack _markStack; // Grey objects behind global finger. 416 HeapWord* volatile _finger; // the global finger, region aligned, 417 // always points to the end of the 418 // last claimed region 419 420 // marking tasks 421 uint _max_worker_id;// maximum worker id 422 uint _active_tasks; // task num currently active 423 CMTask** _tasks; // task queue array (max_worker_id len) 424 CMTaskQueueSet* _task_queues; // task queue set 425 ParallelTaskTerminator _terminator; // for termination 426 427 // Two sync barriers that are used to synchronise tasks when an 428 // overflow occurs. The algorithm is the following. All tasks enter 429 // the first one to ensure that they have all stopped manipulating 430 // the global data structures. After they exit it, they re-initialise 431 // their data structures and task 0 re-initialises the global data 432 // structures. Then, they enter the second sync barrier. This 433 // ensure, that no task starts doing work before all data 434 // structures (local and global) have been re-initialised. When they 435 // exit it, they are free to start working again. 436 WorkGangBarrierSync _first_overflow_barrier_sync; 437 WorkGangBarrierSync _second_overflow_barrier_sync; 438 439 // this is set by any task, when an overflow on the global data 440 // structures is detected. 441 volatile bool _has_overflown; 442 // true: marking is concurrent, false: we're in remark 443 volatile bool _concurrent; 444 // set at the end of a Full GC so that marking aborts 445 volatile bool _has_aborted; 446 447 // used when remark aborts due to an overflow to indicate that 448 // another concurrent marking phase should start 449 volatile bool _restart_for_overflow; 450 451 // This is true from the very start of concurrent marking until the 452 // point when all the tasks complete their work. It is really used 453 // to determine the points between the end of concurrent marking and 454 // time of remark. 455 volatile bool _concurrent_marking_in_progress; 456 457 // verbose level 458 CMVerboseLevel _verbose_level; 459 460 // All of these times are in ms. 461 NumberSeq _init_times; 462 NumberSeq _remark_times; 463 NumberSeq _remark_mark_times; 464 NumberSeq _remark_weak_ref_times; 465 NumberSeq _cleanup_times; 466 double _total_counting_time; 467 double _total_rs_scrub_time; 468 469 double* _accum_task_vtime; // accumulated task vtime 470 471 FlexibleWorkGang* _parallel_workers; 472 473 ForceOverflowSettings _force_overflow_conc; 474 ForceOverflowSettings _force_overflow_stw; 475 476 void weakRefsWork(bool clear_all_soft_refs); 477 478 void swapMarkBitMaps(); 479 480 // It resets the global marking data structures, as well as the 481 // task local ones; should be called during initial mark. 482 void reset(); 483 484 // Resets all the marking data structures. Called when we have to restart 485 // marking or when marking completes (via set_non_marking_state below). 486 void reset_marking_state(bool clear_overflow = true); 487 488 // We do this after we're done with marking so that the marking data 489 // structures are initialised to a sensible and predictable state. 490 void set_non_marking_state(); 491 492 // It should be called to indicate which phase we're in (concurrent 493 // mark or remark) and how many threads are currently active. 494 void set_phase(uint active_tasks, bool concurrent); 495 496 // prints all gathered CM-related statistics 497 void print_stats(); 498 499 bool cleanup_list_is_empty() { 500 return _cleanup_list.is_empty(); 501 } 502 503 // accessor methods 504 uint parallel_marking_threads() const { return _parallel_marking_threads; } 505 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 506 double sleep_factor() { return _sleep_factor; } 507 double marking_task_overhead() { return _marking_task_overhead;} 508 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } 509 double cleanup_task_overhead() { return _cleanup_task_overhead;} 510 511 bool use_parallel_marking_threads() const { 512 assert(parallel_marking_threads() <= 513 max_parallel_marking_threads(), "sanity"); 514 assert((_parallel_workers == NULL && parallel_marking_threads() == 0) || 515 parallel_marking_threads() > 0, 516 "parallel workers not set up correctly"); 517 return _parallel_workers != NULL; 518 } 519 520 HeapWord* finger() { return _finger; } 521 bool concurrent() { return _concurrent; } 522 uint active_tasks() { return _active_tasks; } 523 ParallelTaskTerminator* terminator() { return &_terminator; } 524 525 // It claims the next available region to be scanned by a marking 526 // task/thread. It might return NULL if the next region is empty or 527 // we have run out of regions. In the latter case, out_of_regions() 528 // determines whether we've really run out of regions or the task 529 // should call claim_region() again. This might seem a bit 530 // awkward. Originally, the code was written so that claim_region() 531 // either successfully returned with a non-empty region or there 532 // were no more regions to be claimed. The problem with this was 533 // that, in certain circumstances, it iterated over large chunks of 534 // the heap finding only empty regions and, while it was working, it 535 // was preventing the calling task to call its regular clock 536 // method. So, this way, each task will spend very little time in 537 // claim_region() and is allowed to call the regular clock method 538 // frequently. 539 HeapRegion* claim_region(uint worker_id); 540 541 // It determines whether we've run out of regions to scan. 542 bool out_of_regions() { return _finger == _heap_end; } 543 544 // Returns the task with the given id 545 CMTask* task(int id) { 546 assert(0 <= id && id < (int) _active_tasks, 547 "task id not within active bounds"); 548 return _tasks[id]; 549 } 550 551 // Returns the task queue with the given id 552 CMTaskQueue* task_queue(int id) { 553 assert(0 <= id && id < (int) _active_tasks, 554 "task queue id not within active bounds"); 555 return (CMTaskQueue*) _task_queues->queue(id); 556 } 557 558 // Returns the task queue set 559 CMTaskQueueSet* task_queues() { return _task_queues; } 560 561 // Access / manipulation of the overflow flag which is set to 562 // indicate that the global stack has overflown 563 bool has_overflown() { return _has_overflown; } 564 void set_has_overflown() { _has_overflown = true; } 565 void clear_has_overflown() { _has_overflown = false; } 566 bool restart_for_overflow() { return _restart_for_overflow; } 567 568 bool has_aborted() { return _has_aborted; } 569 570 // Methods to enter the two overflow sync barriers 571 void enter_first_sync_barrier(uint worker_id); 572 void enter_second_sync_barrier(uint worker_id); 573 574 ForceOverflowSettings* force_overflow_conc() { 575 return &_force_overflow_conc; 576 } 577 578 ForceOverflowSettings* force_overflow_stw() { 579 return &_force_overflow_stw; 580 } 581 582 ForceOverflowSettings* force_overflow() { 583 if (concurrent()) { 584 return force_overflow_conc(); 585 } else { 586 return force_overflow_stw(); 587 } 588 } 589 590 // Live Data Counting data structures... 591 // These data structures are initialized at the start of 592 // marking. They are written to while marking is active. 593 // They are aggregated during remark; the aggregated values 594 // are then used to populate the _region_bm, _card_bm, and 595 // the total live bytes, which are then subsequently updated 596 // during cleanup. 597 598 // An array of bitmaps (one bit map per task). Each bitmap 599 // is used to record the cards spanned by the live objects 600 // marked by that task/worker. 601 BitMap* _count_card_bitmaps; 602 603 // Used to record the number of marked live bytes 604 // (for each region, by worker thread). 605 size_t** _count_marked_bytes; 606 607 // Card index of the bottom of the G1 heap. Used for biasing indices into 608 // the card bitmaps. 609 intptr_t _heap_bottom_card_num; 610 611 // Set to true when initialization is complete 612 bool _completed_initialization; 613 614 public: 615 // Manipulation of the global mark stack. 616 // Notice that the first mark_stack_push is CAS-based, whereas the 617 // two below are Mutex-based. This is OK since the first one is only 618 // called during evacuation pauses and doesn't compete with the 619 // other two (which are called by the marking tasks during 620 // concurrent marking or remark). 621 bool mark_stack_push(oop p) { 622 _markStack.par_push(p); 623 if (_markStack.overflow()) { 624 set_has_overflown(); 625 return false; 626 } 627 return true; 628 } 629 bool mark_stack_push(oop* arr, int n) { 630 _markStack.par_push_arr(arr, n); 631 if (_markStack.overflow()) { 632 set_has_overflown(); 633 return false; 634 } 635 return true; 636 } 637 void mark_stack_pop(oop* arr, int max, int* n) { 638 _markStack.par_pop_arr(arr, max, n); 639 } 640 size_t mark_stack_size() { return _markStack.size(); } 641 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 642 bool mark_stack_overflow() { return _markStack.overflow(); } 643 bool mark_stack_empty() { return _markStack.isEmpty(); } 644 645 CMRootRegions* root_regions() { return &_root_regions; } 646 647 bool concurrent_marking_in_progress() { 648 return _concurrent_marking_in_progress; 649 } 650 void set_concurrent_marking_in_progress() { 651 _concurrent_marking_in_progress = true; 652 } 653 void clear_concurrent_marking_in_progress() { 654 _concurrent_marking_in_progress = false; 655 } 656 657 void update_accum_task_vtime(int i, double vtime) { 658 _accum_task_vtime[i] += vtime; 659 } 660 661 double all_task_accum_vtime() { 662 double ret = 0.0; 663 for (uint i = 0; i < _max_worker_id; ++i) 664 ret += _accum_task_vtime[i]; 665 return ret; 666 } 667 668 // Attempts to steal an object from the task queues of other tasks 669 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) { 670 return _task_queues->steal(worker_id, hash_seed, obj); 671 } 672 673 ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs); 674 ~ConcurrentMark(); 675 676 ConcurrentMarkThread* cmThread() { return _cmThread; } 677 678 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 679 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 680 681 // Returns the number of GC threads to be used in a concurrent 682 // phase based on the number of GC threads being used in a STW 683 // phase. 684 uint scale_parallel_threads(uint n_par_threads); 685 686 // Calculates the number of GC threads to be used in a concurrent phase. 687 uint calc_parallel_marking_threads(); 688 689 // The following three are interaction between CM and 690 // G1CollectedHeap 691 692 // This notifies CM that a root during initial-mark needs to be 693 // grayed. It is MT-safe. word_size is the size of the object in 694 // words. It is passed explicitly as sometimes we cannot calculate 695 // it from the given object because it might be in an inconsistent 696 // state (e.g., in to-space and being copied). So the caller is 697 // responsible for dealing with this issue (e.g., get the size from 698 // the from-space image when the to-space image might be 699 // inconsistent) and always passing the size. hr is the region that 700 // contains the object and it's passed optionally from callers who 701 // might already have it (no point in recalculating it). 702 inline void grayRoot(oop obj, size_t word_size, 703 uint worker_id, HeapRegion* hr = NULL); 704 705 // It iterates over the heap and for each object it comes across it 706 // will dump the contents of its reference fields, as well as 707 // liveness information for the object and its referents. The dump 708 // will be written to a file with the following name: 709 // G1PrintReachableBaseFile + "." + str. 710 // vo decides whether the prev (vo == UsePrevMarking), the next 711 // (vo == UseNextMarking) marking information, or the mark word 712 // (vo == UseMarkWord) will be used to determine the liveness of 713 // each object / referent. 714 // If all is true, all objects in the heap will be dumped, otherwise 715 // only the live ones. In the dump the following symbols / breviations 716 // are used: 717 // M : an explicitly live object (its bitmap bit is set) 718 // > : an implicitly live object (over tams) 719 // O : an object outside the G1 heap (typically: in the perm gen) 720 // NOT : a reference field whose referent is not live 721 // AND MARKED : indicates that an object is both explicitly and 722 // implicitly live (it should be one or the other, not both) 723 void print_reachable(const char* str, 724 VerifyOption vo, bool all) PRODUCT_RETURN; 725 726 // Clear the next marking bitmap (will be called concurrently). 727 void clearNextBitmap(); 728 729 // These two do the work that needs to be done before and after the 730 // initial root checkpoint. Since this checkpoint can be done at two 731 // different points (i.e. an explicit pause or piggy-backed on a 732 // young collection), then it's nice to be able to easily share the 733 // pre/post code. It might be the case that we can put everything in 734 // the post method. TP 735 void checkpointRootsInitialPre(); 736 void checkpointRootsInitialPost(); 737 738 // Scan all the root regions and mark everything reachable from 739 // them. 740 void scanRootRegions(); 741 742 // Scan a single root region and mark everything reachable from it. 743 void scanRootRegion(HeapRegion* hr, uint worker_id); 744 745 // Do concurrent phase of marking, to a tentative transitive closure. 746 void markFromRoots(); 747 748 void checkpointRootsFinal(bool clear_all_soft_refs); 749 void checkpointRootsFinalWork(); 750 void cleanup(); 751 void completeCleanup(); 752 753 // Mark in the previous bitmap. NB: this is usually read-only, so use 754 // this carefully! 755 inline void markPrev(oop p); 756 757 // Clears marks for all objects in the given range, for the prev, 758 // next, or both bitmaps. NB: the previous bitmap is usually 759 // read-only, so use this carefully! 760 void clearRangePrevBitmap(MemRegion mr); 761 void clearRangeNextBitmap(MemRegion mr); 762 void clearRangeBothBitmaps(MemRegion mr); 763 764 // Notify data structures that a GC has started. 765 void note_start_of_gc() { 766 _markStack.note_start_of_gc(); 767 } 768 769 // Notify data structures that a GC is finished. 770 void note_end_of_gc() { 771 _markStack.note_end_of_gc(); 772 } 773 774 // Verify that there are no CSet oops on the stacks (taskqueues / 775 // global mark stack), enqueued SATB buffers, per-thread SATB 776 // buffers, and fingers (global / per-task). The boolean parameters 777 // decide which of the above data structures to verify. If marking 778 // is not in progress, it's a no-op. 779 void verify_no_cset_oops(bool verify_stacks, 780 bool verify_enqueued_buffers, 781 bool verify_thread_buffers, 782 bool verify_fingers) PRODUCT_RETURN; 783 784 // It is called at the end of an evacuation pause during marking so 785 // that CM is notified of where the new end of the heap is. It 786 // doesn't do anything if concurrent_marking_in_progress() is false, 787 // unless the force parameter is true. 788 void update_g1_committed(bool force = false); 789 790 bool isMarked(oop p) const { 791 assert(p != NULL && p->is_oop(), "expected an oop"); 792 HeapWord* addr = (HeapWord*)p; 793 assert(addr >= _nextMarkBitMap->startWord() || 794 addr < _nextMarkBitMap->endWord(), "in a region"); 795 796 return _nextMarkBitMap->isMarked(addr); 797 } 798 799 inline bool not_yet_marked(oop p) const; 800 801 // XXX Debug code 802 bool containing_card_is_marked(void* p); 803 bool containing_cards_are_marked(void* start, void* last); 804 805 bool isPrevMarked(oop p) const { 806 assert(p != NULL && p->is_oop(), "expected an oop"); 807 HeapWord* addr = (HeapWord*)p; 808 assert(addr >= _prevMarkBitMap->startWord() || 809 addr < _prevMarkBitMap->endWord(), "in a region"); 810 811 return _prevMarkBitMap->isMarked(addr); 812 } 813 814 inline bool do_yield_check(uint worker_i = 0); 815 inline bool should_yield(); 816 817 // Called to abort the marking cycle after a Full GC takes palce. 818 void abort(); 819 820 // This prints the global/local fingers. It is used for debugging. 821 NOT_PRODUCT(void print_finger();) 822 823 void print_summary_info(); 824 825 void print_worker_threads_on(outputStream* st) const; 826 827 // The following indicate whether a given verbose level has been 828 // set. Notice that anything above stats is conditional to 829 // _MARKING_VERBOSE_ having been set to 1 830 bool verbose_stats() { 831 return _verbose_level >= stats_verbose; 832 } 833 bool verbose_low() { 834 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; 835 } 836 bool verbose_medium() { 837 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; 838 } 839 bool verbose_high() { 840 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; 841 } 842 843 // Liveness counting 844 845 // Utility routine to set an exclusive range of cards on the given 846 // card liveness bitmap 847 inline void set_card_bitmap_range(BitMap* card_bm, 848 BitMap::idx_t start_idx, 849 BitMap::idx_t end_idx, 850 bool is_par); 851 852 // Returns the card number of the bottom of the G1 heap. 853 // Used in biasing indices into accounting card bitmaps. 854 intptr_t heap_bottom_card_num() const { 855 return _heap_bottom_card_num; 856 } 857 858 // Returns the card bitmap for a given task or worker id. 859 BitMap* count_card_bitmap_for(uint worker_id) { 860 assert(0 <= worker_id && worker_id < _max_worker_id, "oob"); 861 assert(_count_card_bitmaps != NULL, "uninitialized"); 862 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 863 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 864 return task_card_bm; 865 } 866 867 // Returns the array containing the marked bytes for each region, 868 // for the given worker or task id. 869 size_t* count_marked_bytes_array_for(uint worker_id) { 870 assert(0 <= worker_id && worker_id < _max_worker_id, "oob"); 871 assert(_count_marked_bytes != NULL, "uninitialized"); 872 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 873 assert(marked_bytes_array != NULL, "uninitialized"); 874 return marked_bytes_array; 875 } 876 877 // Returns the index in the liveness accounting card table bitmap 878 // for the given address 879 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 880 881 // Counts the size of the given memory region in the the given 882 // marked_bytes array slot for the given HeapRegion. 883 // Sets the bits in the given card bitmap that are associated with the 884 // cards that are spanned by the memory region. 885 inline void count_region(MemRegion mr, HeapRegion* hr, 886 size_t* marked_bytes_array, 887 BitMap* task_card_bm); 888 889 // Counts the given memory region in the task/worker counting 890 // data structures for the given worker id. 891 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); 892 893 // Counts the given memory region in the task/worker counting 894 // data structures for the given worker id. 895 inline void count_region(MemRegion mr, uint worker_id); 896 897 // Counts the given object in the given task/worker counting 898 // data structures. 899 inline void count_object(oop obj, HeapRegion* hr, 900 size_t* marked_bytes_array, 901 BitMap* task_card_bm); 902 903 // Counts the given object in the task/worker counting data 904 // structures for the given worker id. 905 inline void count_object(oop obj, HeapRegion* hr, uint worker_id); 906 907 // Attempts to mark the given object and, if successful, counts 908 // the object in the given task/worker counting structures. 909 inline bool par_mark_and_count(oop obj, HeapRegion* hr, 910 size_t* marked_bytes_array, 911 BitMap* task_card_bm); 912 913 // Attempts to mark the given object and, if successful, counts 914 // the object in the task/worker counting structures for the 915 // given worker id. 916 inline bool par_mark_and_count(oop obj, size_t word_size, 917 HeapRegion* hr, uint worker_id); 918 919 // Attempts to mark the given object and, if successful, counts 920 // the object in the task/worker counting structures for the 921 // given worker id. 922 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); 923 924 // Similar to the above routine but we don't know the heap region that 925 // contains the object to be marked/counted, which this routine looks up. 926 inline bool par_mark_and_count(oop obj, uint worker_id); 927 928 // Similar to the above routine but there are times when we cannot 929 // safely calculate the size of obj due to races and we, therefore, 930 // pass the size in as a parameter. It is the caller's reponsibility 931 // to ensure that the size passed in for obj is valid. 932 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); 933 934 // Unconditionally mark the given object, and unconditinally count 935 // the object in the counting structures for worker id 0. 936 // Should *not* be called from parallel code. 937 inline bool mark_and_count(oop obj, HeapRegion* hr); 938 939 // Similar to the above routine but we don't know the heap region that 940 // contains the object to be marked/counted, which this routine looks up. 941 // Should *not* be called from parallel code. 942 inline bool mark_and_count(oop obj); 943 944 // Returns true if initialization was successfully completed. 945 bool completed_initialization() const { 946 return _completed_initialization; 947 } 948 949 protected: 950 // Clear all the per-task bitmaps and arrays used to store the 951 // counting data. 952 void clear_all_count_data(); 953 954 // Aggregates the counting data for each worker/task 955 // that was constructed while marking. Also sets 956 // the amount of marked bytes for each region and 957 // the top at concurrent mark count. 958 void aggregate_count_data(); 959 960 // Verification routine 961 void verify_count_data(); 962 }; 963 964 // A class representing a marking task. 965 class CMTask : public TerminatorTerminator { 966 private: 967 enum PrivateConstants { 968 // the regular clock call is called once the scanned words reaches 969 // this limit 970 words_scanned_period = 12*1024, 971 // the regular clock call is called once the number of visited 972 // references reaches this limit 973 refs_reached_period = 384, 974 // initial value for the hash seed, used in the work stealing code 975 init_hash_seed = 17, 976 // how many entries will be transferred between global stack and 977 // local queues 978 global_stack_transfer_size = 16 979 }; 980 981 uint _worker_id; 982 G1CollectedHeap* _g1h; 983 ConcurrentMark* _cm; 984 CMBitMap* _nextMarkBitMap; 985 // the task queue of this task 986 CMTaskQueue* _task_queue; 987 private: 988 // the task queue set---needed for stealing 989 CMTaskQueueSet* _task_queues; 990 // indicates whether the task has been claimed---this is only for 991 // debugging purposes 992 bool _claimed; 993 994 // number of calls to this task 995 int _calls; 996 997 // when the virtual timer reaches this time, the marking step should 998 // exit 999 double _time_target_ms; 1000 // the start time of the current marking step 1001 double _start_time_ms; 1002 1003 // the oop closure used for iterations over oops 1004 G1CMOopClosure* _cm_oop_closure; 1005 1006 // the region this task is scanning, NULL if we're not scanning any 1007 HeapRegion* _curr_region; 1008 // the local finger of this task, NULL if we're not scanning a region 1009 HeapWord* _finger; 1010 // limit of the region this task is scanning, NULL if we're not scanning one 1011 HeapWord* _region_limit; 1012 1013 // the number of words this task has scanned 1014 size_t _words_scanned; 1015 // When _words_scanned reaches this limit, the regular clock is 1016 // called. Notice that this might be decreased under certain 1017 // circumstances (i.e. when we believe that we did an expensive 1018 // operation). 1019 size_t _words_scanned_limit; 1020 // the initial value of _words_scanned_limit (i.e. what it was 1021 // before it was decreased). 1022 size_t _real_words_scanned_limit; 1023 1024 // the number of references this task has visited 1025 size_t _refs_reached; 1026 // When _refs_reached reaches this limit, the regular clock is 1027 // called. Notice this this might be decreased under certain 1028 // circumstances (i.e. when we believe that we did an expensive 1029 // operation). 1030 size_t _refs_reached_limit; 1031 // the initial value of _refs_reached_limit (i.e. what it was before 1032 // it was decreased). 1033 size_t _real_refs_reached_limit; 1034 1035 // used by the work stealing stuff 1036 int _hash_seed; 1037 // if this is true, then the task has aborted for some reason 1038 bool _has_aborted; 1039 // set when the task aborts because it has met its time quota 1040 bool _has_timed_out; 1041 // true when we're draining SATB buffers; this avoids the task 1042 // aborting due to SATB buffers being available (as we're already 1043 // dealing with them) 1044 bool _draining_satb_buffers; 1045 1046 // number sequence of past step times 1047 NumberSeq _step_times_ms; 1048 // elapsed time of this task 1049 double _elapsed_time_ms; 1050 // termination time of this task 1051 double _termination_time_ms; 1052 // when this task got into the termination protocol 1053 double _termination_start_time_ms; 1054 1055 // true when the task is during a concurrent phase, false when it is 1056 // in the remark phase (so, in the latter case, we do not have to 1057 // check all the things that we have to check during the concurrent 1058 // phase, i.e. SATB buffer availability...) 1059 bool _concurrent; 1060 1061 TruncatedSeq _marking_step_diffs_ms; 1062 1063 // Counting data structures. Embedding the task's marked_bytes_array 1064 // and card bitmap into the actual task saves having to go through 1065 // the ConcurrentMark object. 1066 size_t* _marked_bytes_array; 1067 BitMap* _card_bm; 1068 1069 // LOTS of statistics related with this task 1070 #if _MARKING_STATS_ 1071 NumberSeq _all_clock_intervals_ms; 1072 double _interval_start_time_ms; 1073 1074 int _aborted; 1075 int _aborted_overflow; 1076 int _aborted_cm_aborted; 1077 int _aborted_yield; 1078 int _aborted_timed_out; 1079 int _aborted_satb; 1080 int _aborted_termination; 1081 1082 int _steal_attempts; 1083 int _steals; 1084 1085 int _clock_due_to_marking; 1086 int _clock_due_to_scanning; 1087 1088 int _local_pushes; 1089 int _local_pops; 1090 int _local_max_size; 1091 int _objs_scanned; 1092 1093 int _global_pushes; 1094 int _global_pops; 1095 int _global_max_size; 1096 1097 int _global_transfers_to; 1098 int _global_transfers_from; 1099 1100 int _regions_claimed; 1101 int _objs_found_on_bitmap; 1102 1103 int _satb_buffers_processed; 1104 #endif // _MARKING_STATS_ 1105 1106 // it updates the local fields after this task has claimed 1107 // a new region to scan 1108 void setup_for_region(HeapRegion* hr); 1109 // it brings up-to-date the limit of the region 1110 void update_region_limit(); 1111 1112 // called when either the words scanned or the refs visited limit 1113 // has been reached 1114 void reached_limit(); 1115 // recalculates the words scanned and refs visited limits 1116 void recalculate_limits(); 1117 // decreases the words scanned and refs visited limits when we reach 1118 // an expensive operation 1119 void decrease_limits(); 1120 // it checks whether the words scanned or refs visited reached their 1121 // respective limit and calls reached_limit() if they have 1122 void check_limits() { 1123 if (_words_scanned >= _words_scanned_limit || 1124 _refs_reached >= _refs_reached_limit) { 1125 reached_limit(); 1126 } 1127 } 1128 // this is supposed to be called regularly during a marking step as 1129 // it checks a bunch of conditions that might cause the marking step 1130 // to abort 1131 void regular_clock_call(); 1132 bool concurrent() { return _concurrent; } 1133 1134 public: 1135 // It resets the task; it should be called right at the beginning of 1136 // a marking phase. 1137 void reset(CMBitMap* _nextMarkBitMap); 1138 // it clears all the fields that correspond to a claimed region. 1139 void clear_region_fields(); 1140 1141 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 1142 1143 // The main method of this class which performs a marking step 1144 // trying not to exceed the given duration. However, it might exit 1145 // prematurely, according to some conditions (i.e. SATB buffers are 1146 // available for processing). 1147 void do_marking_step(double target_ms, bool do_stealing, bool do_termination); 1148 1149 // These two calls start and stop the timer 1150 void record_start_time() { 1151 _elapsed_time_ms = os::elapsedTime() * 1000.0; 1152 } 1153 void record_end_time() { 1154 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1155 } 1156 1157 // returns the worker ID associated with this task. 1158 uint worker_id() { return _worker_id; } 1159 1160 // From TerminatorTerminator. It determines whether this task should 1161 // exit the termination protocol after it's entered it. 1162 virtual bool should_exit_termination(); 1163 1164 // Resets the local region fields after a task has finished scanning a 1165 // region; or when they have become stale as a result of the region 1166 // being evacuated. 1167 void giveup_current_region(); 1168 1169 HeapWord* finger() { return _finger; } 1170 1171 bool has_aborted() { return _has_aborted; } 1172 void set_has_aborted() { _has_aborted = true; } 1173 void clear_has_aborted() { _has_aborted = false; } 1174 bool has_timed_out() { return _has_timed_out; } 1175 bool claimed() { return _claimed; } 1176 1177 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 1178 1179 // It grays the object by marking it and, if necessary, pushing it 1180 // on the local queue 1181 inline void deal_with_reference(oop obj); 1182 1183 // It scans an object and visits its children. 1184 void scan_object(oop obj); 1185 1186 // It pushes an object on the local queue. 1187 inline void push(oop obj); 1188 1189 // These two move entries to/from the global stack. 1190 void move_entries_to_global_stack(); 1191 void get_entries_from_global_stack(); 1192 1193 // It pops and scans objects from the local queue. If partially is 1194 // true, then it stops when the queue size is of a given limit. If 1195 // partially is false, then it stops when the queue is empty. 1196 void drain_local_queue(bool partially); 1197 // It moves entries from the global stack to the local queue and 1198 // drains the local queue. If partially is true, then it stops when 1199 // both the global stack and the local queue reach a given size. If 1200 // partially if false, it tries to empty them totally. 1201 void drain_global_stack(bool partially); 1202 // It keeps picking SATB buffers and processing them until no SATB 1203 // buffers are available. 1204 void drain_satb_buffers(); 1205 1206 // moves the local finger to a new location 1207 inline void move_finger_to(HeapWord* new_finger) { 1208 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 1209 _finger = new_finger; 1210 } 1211 1212 CMTask(uint worker_id, ConcurrentMark *cm, 1213 size_t* marked_bytes, BitMap* card_bm, 1214 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1215 1216 // it prints statistics associated with this task 1217 void print_stats(); 1218 1219 #if _MARKING_STATS_ 1220 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } 1221 #endif // _MARKING_STATS_ 1222 }; 1223 1224 // Class that's used to to print out per-region liveness 1225 // information. It's currently used at the end of marking and also 1226 // after we sort the old regions at the end of the cleanup operation. 1227 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 1228 private: 1229 outputStream* _out; 1230 1231 // Accumulators for these values. 1232 size_t _total_used_bytes; 1233 size_t _total_capacity_bytes; 1234 size_t _total_prev_live_bytes; 1235 size_t _total_next_live_bytes; 1236 1237 // These are set up when we come across a "stars humongous" region 1238 // (as this is where most of this information is stored, not in the 1239 // subsequent "continues humongous" regions). After that, for every 1240 // region in a given humongous region series we deduce the right 1241 // values for it by simply subtracting the appropriate amount from 1242 // these fields. All these values should reach 0 after we've visited 1243 // the last region in the series. 1244 size_t _hum_used_bytes; 1245 size_t _hum_capacity_bytes; 1246 size_t _hum_prev_live_bytes; 1247 size_t _hum_next_live_bytes; 1248 1249 static double perc(size_t val, size_t total) { 1250 if (total == 0) { 1251 return 0.0; 1252 } else { 1253 return 100.0 * ((double) val / (double) total); 1254 } 1255 } 1256 1257 static double bytes_to_mb(size_t val) { 1258 return (double) val / (double) M; 1259 } 1260 1261 // See the .cpp file. 1262 size_t get_hum_bytes(size_t* hum_bytes); 1263 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 1264 size_t* prev_live_bytes, size_t* next_live_bytes); 1265 1266 public: 1267 // The header and footer are printed in the constructor and 1268 // destructor respectively. 1269 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); 1270 virtual bool doHeapRegion(HeapRegion* r); 1271 ~G1PrintRegionLivenessInfoClosure(); 1272 }; 1273 1274 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP