1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 27 28 #include "gc_implementation/g1/heapRegionSets.hpp" 29 #include "utilities/taskqueue.hpp" 30 31 class G1CollectedHeap; 32 class CMTask; 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; 35 36 // Closure used by CM during concurrent reference discovery 37 // and reference processing (during remarking) to determine 38 // if a particular object is alive. It is primarily used 39 // to determine if referents of discovered reference objects 40 // are alive. An instance is also embedded into the 41 // reference processor as the _is_alive_non_header field 42 class G1CMIsAliveClosure: public BoolObjectClosure { 43 G1CollectedHeap* _g1; 44 public: 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 46 47 void do_object(oop obj) { 48 ShouldNotCallThis(); 49 } 50 bool do_object_b(oop obj); 51 }; 52 53 // A generic CM bit map. This is essentially a wrapper around the BitMap 54 // class, with one bit per (1<<_shifter) HeapWords. 55 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { 57 protected: 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // map to char or bit 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 CMBitMapRO(ReservedSpace rs, int shifter); 67 68 enum { do_yield = true }; 69 70 // inquiries 71 HeapWord* startWord() const { return _bmStartWord; } 72 size_t sizeInWords() const { return _bmWordSize; } 73 // the following is one past the last word in space 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 75 76 // read marks 77 78 bool isMarked(HeapWord* addr) const { 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 80 "outside underlying space?"); 81 return _bm.at(heapWordToOffset(addr)); 82 } 83 84 // iteration 85 inline bool iterate(BitMapClosure* cl, MemRegion mr); 86 inline bool iterate(BitMapClosure* cl); 87 88 // Return the address corresponding to the next marked bit at or after 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr, 92 HeapWord* limit = NULL) const; 93 // Return the address corresponding to the next unmarked bit at or after 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, 97 HeapWord* limit = NULL) const; 98 99 // conversion utilities 100 // XXX Fix these so that offsets are size_t's... 101 HeapWord* offsetToHeapWord(size_t offset) const { 102 return _bmStartWord + (offset << _shifter); 103 } 104 size_t heapWordToOffset(HeapWord* addr) const { 105 return pointer_delta(addr, _bmStartWord) >> _shifter; 106 } 107 int heapWordDiffToOffsetDiff(size_t diff) const; 108 HeapWord* nextWord(HeapWord* addr) { 109 return offsetToHeapWord(heapWordToOffset(addr) + 1); 110 } 111 112 // debugging 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;) 114 }; 115 116 class CMBitMap : public CMBitMapRO { 117 118 public: 119 // constructor 120 CMBitMap(ReservedSpace rs, int shifter) : 121 CMBitMapRO(rs, shifter) {} 122 123 // write marks 124 void mark(HeapWord* addr) { 125 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 126 "outside underlying space?"); 127 _bm.set_bit(heapWordToOffset(addr)); 128 } 129 void clear(HeapWord* addr) { 130 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 131 "outside underlying space?"); 132 _bm.clear_bit(heapWordToOffset(addr)); 133 } 134 bool parMark(HeapWord* addr) { 135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 136 "outside underlying space?"); 137 return _bm.par_set_bit(heapWordToOffset(addr)); 138 } 139 bool parClear(HeapWord* addr) { 140 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 141 "outside underlying space?"); 142 return _bm.par_clear_bit(heapWordToOffset(addr)); 143 } 144 void markRange(MemRegion mr); 145 void clearAll(); 146 void clearRange(MemRegion mr); 147 148 // Starting at the bit corresponding to "addr" (inclusive), find the next 149 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find 150 // the end of this run (stopping at "end_addr"). Return the MemRegion 151 // covering from the start of the region corresponding to the first bit 152 // of the run to the end of the region corresponding to the last bit of 153 // the run. If there is no "1" bit at or after "addr", return an empty 154 // MemRegion. 155 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); 156 }; 157 158 // Represents a marking stack used by the CM collector. 159 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 160 class CMMarkStack VALUE_OBJ_CLASS_SPEC { 161 ConcurrentMark* _cm; 162 oop* _base; // bottom of stack 163 jint _index; // one more than last occupied index 164 jint _capacity; // max #elements 165 jint _saved_index; // value of _index saved at start of GC 166 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run 167 168 bool _overflow; 169 DEBUG_ONLY(bool _drain_in_progress;) 170 DEBUG_ONLY(bool _drain_in_progress_yields;) 171 172 public: 173 CMMarkStack(ConcurrentMark* cm); 174 ~CMMarkStack(); 175 176 void allocate(size_t size); 177 178 oop pop() { 179 if (!isEmpty()) { 180 return _base[--_index] ; 181 } 182 return NULL; 183 } 184 185 // If overflow happens, don't do the push, and record the overflow. 186 // *Requires* that "ptr" is already marked. 187 void push(oop ptr) { 188 if (isFull()) { 189 // Record overflow. 190 _overflow = true; 191 return; 192 } else { 193 _base[_index++] = ptr; 194 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 195 } 196 } 197 // Non-block impl. Note: concurrency is allowed only with other 198 // "par_push" operations, not with "pop" or "drain". We would need 199 // parallel versions of them if such concurrency was desired. 200 void par_push(oop ptr); 201 202 // Pushes the first "n" elements of "ptr_arr" on the stack. 203 // Non-block impl. Note: concurrency is allowed only with other 204 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". 205 void par_adjoin_arr(oop* ptr_arr, int n); 206 207 // Pushes the first "n" elements of "ptr_arr" on the stack. 208 // Locking impl: concurrency is allowed only with 209 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 210 // locking strategy. 211 void par_push_arr(oop* ptr_arr, int n); 212 213 // If returns false, the array was empty. Otherwise, removes up to "max" 214 // elements from the stack, and transfers them to "ptr_arr" in an 215 // unspecified order. The actual number transferred is given in "n" ("n 216 // == 0" is deliberately redundant with the return value.) Locking impl: 217 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 218 // operations, which use the same locking strategy. 219 bool par_pop_arr(oop* ptr_arr, int max, int* n); 220 221 // Drain the mark stack, applying the given closure to all fields of 222 // objects on the stack. (That is, continue until the stack is empty, 223 // even if closure applications add entries to the stack.) The "bm" 224 // argument, if non-null, may be used to verify that only marked objects 225 // are on the mark stack. If "yield_after" is "true", then the 226 // concurrent marker performing the drain offers to yield after 227 // processing each object. If a yield occurs, stops the drain operation 228 // and returns false. Otherwise, returns true. 229 template<class OopClosureClass> 230 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); 231 232 bool isEmpty() { return _index == 0; } 233 bool isFull() { return _index == _capacity; } 234 int maxElems() { return _capacity; } 235 236 bool overflow() { return _overflow; } 237 void clear_overflow() { _overflow = false; } 238 239 int size() { return _index; } 240 241 void setEmpty() { _index = 0; clear_overflow(); } 242 243 // Record the current index. 244 void note_start_of_gc(); 245 246 // Make sure that we have not added any entries to the stack during GC. 247 void note_end_of_gc(); 248 249 // iterate over the oops in the mark stack, up to the bound recorded via 250 // the call above. 251 void oops_do(OopClosure* f); 252 }; 253 254 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { 255 private: 256 #ifndef PRODUCT 257 uintx _num_remaining; 258 bool _force; 259 #endif // !defined(PRODUCT) 260 261 public: 262 void init() PRODUCT_RETURN; 263 void update() PRODUCT_RETURN; 264 bool should_force() PRODUCT_RETURN_( return false; ); 265 }; 266 267 // this will enable a variety of different statistics per GC task 268 #define _MARKING_STATS_ 0 269 // this will enable the higher verbose levels 270 #define _MARKING_VERBOSE_ 0 271 272 #if _MARKING_STATS_ 273 #define statsOnly(statement) \ 274 do { \ 275 statement ; \ 276 } while (0) 277 #else // _MARKING_STATS_ 278 #define statsOnly(statement) \ 279 do { \ 280 } while (0) 281 #endif // _MARKING_STATS_ 282 283 typedef enum { 284 no_verbose = 0, // verbose turned off 285 stats_verbose, // only prints stats at the end of marking 286 low_verbose, // low verbose, mostly per region and per major event 287 medium_verbose, // a bit more detailed than low 288 high_verbose // per object verbose 289 } CMVerboseLevel; 290 291 class YoungList; 292 293 // Root Regions are regions that are not empty at the beginning of a 294 // marking cycle and which we might collect during an evacuation pause 295 // while the cycle is active. Given that, during evacuation pauses, we 296 // do not copy objects that are explicitly marked, what we have to do 297 // for the root regions is to scan them and mark all objects reachable 298 // from them. According to the SATB assumptions, we only need to visit 299 // each object once during marking. So, as long as we finish this scan 300 // before the next evacuation pause, we can copy the objects from the 301 // root regions without having to mark them or do anything else to them. 302 // 303 // Currently, we only support root region scanning once (at the start 304 // of the marking cycle) and the root regions are all the survivor 305 // regions populated during the initial-mark pause. 306 class CMRootRegions VALUE_OBJ_CLASS_SPEC { 307 private: 308 YoungList* _young_list; 309 ConcurrentMark* _cm; 310 311 volatile bool _scan_in_progress; 312 volatile bool _should_abort; 313 HeapRegion* volatile _next_survivor; 314 315 public: 316 CMRootRegions(); 317 // We actually do most of the initialization in this method. 318 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); 319 320 // Reset the claiming / scanning of the root regions. 321 void prepare_for_scan(); 322 323 // Forces get_next() to return NULL so that the iteration aborts early. 324 void abort() { _should_abort = true; } 325 326 // Return true if the CM thread are actively scanning root regions, 327 // false otherwise. 328 bool scan_in_progress() { return _scan_in_progress; } 329 330 // Claim the next root region to scan atomically, or return NULL if 331 // all have been claimed. 332 HeapRegion* claim_next(); 333 334 // Flag that we're done with root region scanning and notify anyone 335 // who's waiting on it. If aborted is false, assume that all regions 336 // have been claimed. 337 void scan_finished(); 338 339 // If CM threads are still scanning root regions, wait until they 340 // are done. Return true if we had to wait, false otherwise. 341 bool wait_until_scan_finished(); 342 }; 343 344 class ConcurrentMarkThread; 345 346 class ConcurrentMark: public CHeapObj<mtGC> { 347 friend class ConcurrentMarkThread; 348 friend class CMTask; 349 friend class CMBitMapClosure; 350 friend class CMGlobalObjectClosure; 351 friend class CMRemarkTask; 352 friend class CMConcurrentMarkingTask; 353 friend class G1ParNoteEndTask; 354 friend class CalcLiveObjectsClosure; 355 friend class G1CMRefProcTaskProxy; 356 friend class G1CMRefProcTaskExecutor; 357 friend class G1CMParKeepAliveAndDrainClosure; 358 friend class G1CMParDrainMarkingStackClosure; 359 360 protected: 361 ConcurrentMarkThread* _cmThread; // the thread doing the work 362 G1CollectedHeap* _g1h; // the heap. 363 uint _parallel_marking_threads; // the number of marking 364 // threads we're use 365 uint _max_parallel_marking_threads; // max number of marking 366 // threads we'll ever use 367 double _sleep_factor; // how much we have to sleep, with 368 // respect to the work we just did, to 369 // meet the marking overhead goal 370 double _marking_task_overhead; // marking target overhead for 371 // a single task 372 373 // same as the two above, but for the cleanup task 374 double _cleanup_sleep_factor; 375 double _cleanup_task_overhead; 376 377 FreeRegionList _cleanup_list; 378 379 // Concurrent marking support structures 380 CMBitMap _markBitMap1; 381 CMBitMap _markBitMap2; 382 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap 383 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap 384 385 BitMap _region_bm; 386 BitMap _card_bm; 387 388 // Heap bounds 389 HeapWord* _heap_start; 390 HeapWord* _heap_end; 391 392 // Root region tracking and claiming. 393 CMRootRegions _root_regions; 394 395 // For gray objects 396 CMMarkStack _markStack; // Grey objects behind global finger. 397 HeapWord* volatile _finger; // the global finger, region aligned, 398 // always points to the end of the 399 // last claimed region 400 401 // marking tasks 402 uint _max_task_num; // maximum task number 403 uint _active_tasks; // task num currently active 404 CMTask** _tasks; // task queue array (max_task_num len) 405 CMTaskQueueSet* _task_queues; // task queue set 406 ParallelTaskTerminator _terminator; // for termination 407 408 // Two sync barriers that are used to synchronise tasks when an 409 // overflow occurs. The algorithm is the following. All tasks enter 410 // the first one to ensure that they have all stopped manipulating 411 // the global data structures. After they exit it, they re-initialise 412 // their data structures and task 0 re-initialises the global data 413 // structures. Then, they enter the second sync barrier. This 414 // ensure, that no task starts doing work before all data 415 // structures (local and global) have been re-initialised. When they 416 // exit it, they are free to start working again. 417 WorkGangBarrierSync _first_overflow_barrier_sync; 418 WorkGangBarrierSync _second_overflow_barrier_sync; 419 420 // this is set by any task, when an overflow on the global data 421 // structures is detected. 422 volatile bool _has_overflown; 423 // true: marking is concurrent, false: we're in remark 424 volatile bool _concurrent; 425 // set at the end of a Full GC so that marking aborts 426 volatile bool _has_aborted; 427 428 // used when remark aborts due to an overflow to indicate that 429 // another concurrent marking phase should start 430 volatile bool _restart_for_overflow; 431 432 // This is true from the very start of concurrent marking until the 433 // point when all the tasks complete their work. It is really used 434 // to determine the points between the end of concurrent marking and 435 // time of remark. 436 volatile bool _concurrent_marking_in_progress; 437 438 // verbose level 439 CMVerboseLevel _verbose_level; 440 441 // All of these times are in ms. 442 NumberSeq _init_times; 443 NumberSeq _remark_times; 444 NumberSeq _remark_mark_times; 445 NumberSeq _remark_weak_ref_times; 446 NumberSeq _cleanup_times; 447 double _total_counting_time; 448 double _total_rs_scrub_time; 449 450 double* _accum_task_vtime; // accumulated task vtime 451 452 FlexibleWorkGang* _parallel_workers; 453 454 ForceOverflowSettings _force_overflow_conc; 455 ForceOverflowSettings _force_overflow_stw; 456 457 void weakRefsWork(bool clear_all_soft_refs); 458 459 void swapMarkBitMaps(); 460 461 // It resets the global marking data structures, as well as the 462 // task local ones; should be called during initial mark. 463 void reset(); 464 465 // Resets all the marking data structures. Called when we have to restart 466 // marking or when marking completes (via set_non_marking_state below). 467 void reset_marking_state(bool clear_overflow = true); 468 469 // We do this after we're done with marking so that the marking data 470 // structures are initialised to a sensible and predictable state. 471 void set_non_marking_state(); 472 473 // It should be called to indicate which phase we're in (concurrent 474 // mark or remark) and how many threads are currently active. 475 void set_phase(uint active_tasks, bool concurrent); 476 477 // prints all gathered CM-related statistics 478 void print_stats(); 479 480 bool cleanup_list_is_empty() { 481 return _cleanup_list.is_empty(); 482 } 483 484 // accessor methods 485 uint parallel_marking_threads() { return _parallel_marking_threads; } 486 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;} 487 double sleep_factor() { return _sleep_factor; } 488 double marking_task_overhead() { return _marking_task_overhead;} 489 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } 490 double cleanup_task_overhead() { return _cleanup_task_overhead;} 491 492 HeapWord* finger() { return _finger; } 493 bool concurrent() { return _concurrent; } 494 uint active_tasks() { return _active_tasks; } 495 ParallelTaskTerminator* terminator() { return &_terminator; } 496 497 // It claims the next available region to be scanned by a marking 498 // task. It might return NULL if the next region is empty or we have 499 // run out of regions. In the latter case, out_of_regions() 500 // determines whether we've really run out of regions or the task 501 // should call claim_region() again. This might seem a bit 502 // awkward. Originally, the code was written so that claim_region() 503 // either successfully returned with a non-empty region or there 504 // were no more regions to be claimed. The problem with this was 505 // that, in certain circumstances, it iterated over large chunks of 506 // the heap finding only empty regions and, while it was working, it 507 // was preventing the calling task to call its regular clock 508 // method. So, this way, each task will spend very little time in 509 // claim_region() and is allowed to call the regular clock method 510 // frequently. 511 HeapRegion* claim_region(int task); 512 513 // It determines whether we've run out of regions to scan. 514 bool out_of_regions() { return _finger == _heap_end; } 515 516 // Returns the task with the given id 517 CMTask* task(int id) { 518 assert(0 <= id && id < (int) _active_tasks, 519 "task id not within active bounds"); 520 return _tasks[id]; 521 } 522 523 // Returns the task queue with the given id 524 CMTaskQueue* task_queue(int id) { 525 assert(0 <= id && id < (int) _active_tasks, 526 "task queue id not within active bounds"); 527 return (CMTaskQueue*) _task_queues->queue(id); 528 } 529 530 // Returns the task queue set 531 CMTaskQueueSet* task_queues() { return _task_queues; } 532 533 // Access / manipulation of the overflow flag which is set to 534 // indicate that the global stack has overflown 535 bool has_overflown() { return _has_overflown; } 536 void set_has_overflown() { _has_overflown = true; } 537 void clear_has_overflown() { _has_overflown = false; } 538 bool restart_for_overflow() { return _restart_for_overflow; } 539 540 bool has_aborted() { return _has_aborted; } 541 542 // Methods to enter the two overflow sync barriers 543 void enter_first_sync_barrier(int task_num); 544 void enter_second_sync_barrier(int task_num); 545 546 ForceOverflowSettings* force_overflow_conc() { 547 return &_force_overflow_conc; 548 } 549 550 ForceOverflowSettings* force_overflow_stw() { 551 return &_force_overflow_stw; 552 } 553 554 ForceOverflowSettings* force_overflow() { 555 if (concurrent()) { 556 return force_overflow_conc(); 557 } else { 558 return force_overflow_stw(); 559 } 560 } 561 562 // Live Data Counting data structures... 563 // These data structures are initialized at the start of 564 // marking. They are written to while marking is active. 565 // They are aggregated during remark; the aggregated values 566 // are then used to populate the _region_bm, _card_bm, and 567 // the total live bytes, which are then subsequently updated 568 // during cleanup. 569 570 // An array of bitmaps (one bit map per task). Each bitmap 571 // is used to record the cards spanned by the live objects 572 // marked by that task/worker. 573 BitMap* _count_card_bitmaps; 574 575 // Used to record the number of marked live bytes 576 // (for each region, by worker thread). 577 size_t** _count_marked_bytes; 578 579 // Card index of the bottom of the G1 heap. Used for biasing indices into 580 // the card bitmaps. 581 intptr_t _heap_bottom_card_num; 582 583 public: 584 // Manipulation of the global mark stack. 585 // Notice that the first mark_stack_push is CAS-based, whereas the 586 // two below are Mutex-based. This is OK since the first one is only 587 // called during evacuation pauses and doesn't compete with the 588 // other two (which are called by the marking tasks during 589 // concurrent marking or remark). 590 bool mark_stack_push(oop p) { 591 _markStack.par_push(p); 592 if (_markStack.overflow()) { 593 set_has_overflown(); 594 return false; 595 } 596 return true; 597 } 598 bool mark_stack_push(oop* arr, int n) { 599 _markStack.par_push_arr(arr, n); 600 if (_markStack.overflow()) { 601 set_has_overflown(); 602 return false; 603 } 604 return true; 605 } 606 void mark_stack_pop(oop* arr, int max, int* n) { 607 _markStack.par_pop_arr(arr, max, n); 608 } 609 size_t mark_stack_size() { return _markStack.size(); } 610 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 611 bool mark_stack_overflow() { return _markStack.overflow(); } 612 bool mark_stack_empty() { return _markStack.isEmpty(); } 613 614 CMRootRegions* root_regions() { return &_root_regions; } 615 616 bool concurrent_marking_in_progress() { 617 return _concurrent_marking_in_progress; 618 } 619 void set_concurrent_marking_in_progress() { 620 _concurrent_marking_in_progress = true; 621 } 622 void clear_concurrent_marking_in_progress() { 623 _concurrent_marking_in_progress = false; 624 } 625 626 void update_accum_task_vtime(int i, double vtime) { 627 _accum_task_vtime[i] += vtime; 628 } 629 630 double all_task_accum_vtime() { 631 double ret = 0.0; 632 for (int i = 0; i < (int)_max_task_num; ++i) 633 ret += _accum_task_vtime[i]; 634 return ret; 635 } 636 637 // Attempts to steal an object from the task queues of other tasks 638 bool try_stealing(int task_num, int* hash_seed, oop& obj) { 639 return _task_queues->steal(task_num, hash_seed, obj); 640 } 641 642 ConcurrentMark(ReservedSpace rs, uint max_regions); 643 ~ConcurrentMark(); 644 645 ConcurrentMarkThread* cmThread() { return _cmThread; } 646 647 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 648 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 649 650 // Returns the number of GC threads to be used in a concurrent 651 // phase based on the number of GC threads being used in a STW 652 // phase. 653 uint scale_parallel_threads(uint n_par_threads); 654 655 // Calculates the number of GC threads to be used in a concurrent phase. 656 uint calc_parallel_marking_threads(); 657 658 // The following three are interaction between CM and 659 // G1CollectedHeap 660 661 // This notifies CM that a root during initial-mark needs to be 662 // grayed. It is MT-safe. word_size is the size of the object in 663 // words. It is passed explicitly as sometimes we cannot calculate 664 // it from the given object because it might be in an inconsistent 665 // state (e.g., in to-space and being copied). So the caller is 666 // responsible for dealing with this issue (e.g., get the size from 667 // the from-space image when the to-space image might be 668 // inconsistent) and always passing the size. hr is the region that 669 // contains the object and it's passed optionally from callers who 670 // might already have it (no point in recalculating it). 671 inline void grayRoot(oop obj, size_t word_size, 672 uint worker_id, HeapRegion* hr = NULL); 673 674 // It iterates over the heap and for each object it comes across it 675 // will dump the contents of its reference fields, as well as 676 // liveness information for the object and its referents. The dump 677 // will be written to a file with the following name: 678 // G1PrintReachableBaseFile + "." + str. 679 // vo decides whether the prev (vo == UsePrevMarking), the next 680 // (vo == UseNextMarking) marking information, or the mark word 681 // (vo == UseMarkWord) will be used to determine the liveness of 682 // each object / referent. 683 // If all is true, all objects in the heap will be dumped, otherwise 684 // only the live ones. In the dump the following symbols / breviations 685 // are used: 686 // M : an explicitly live object (its bitmap bit is set) 687 // > : an implicitly live object (over tams) 688 // O : an object outside the G1 heap (typically: in the perm gen) 689 // NOT : a reference field whose referent is not live 690 // AND MARKED : indicates that an object is both explicitly and 691 // implicitly live (it should be one or the other, not both) 692 void print_reachable(const char* str, 693 VerifyOption vo, bool all) PRODUCT_RETURN; 694 695 // Clear the next marking bitmap (will be called concurrently). 696 void clearNextBitmap(); 697 698 // These two do the work that needs to be done before and after the 699 // initial root checkpoint. Since this checkpoint can be done at two 700 // different points (i.e. an explicit pause or piggy-backed on a 701 // young collection), then it's nice to be able to easily share the 702 // pre/post code. It might be the case that we can put everything in 703 // the post method. TP 704 void checkpointRootsInitialPre(); 705 void checkpointRootsInitialPost(); 706 707 // Scan all the root regions and mark everything reachable from 708 // them. 709 void scanRootRegions(); 710 711 // Scan a single root region and mark everything reachable from it. 712 void scanRootRegion(HeapRegion* hr, uint worker_id); 713 714 // Do concurrent phase of marking, to a tentative transitive closure. 715 void markFromRoots(); 716 717 void checkpointRootsFinal(bool clear_all_soft_refs); 718 void checkpointRootsFinalWork(); 719 void cleanup(); 720 void completeCleanup(); 721 722 // Mark in the previous bitmap. NB: this is usually read-only, so use 723 // this carefully! 724 inline void markPrev(oop p); 725 726 // Clears marks for all objects in the given range, for the prev, 727 // next, or both bitmaps. NB: the previous bitmap is usually 728 // read-only, so use this carefully! 729 void clearRangePrevBitmap(MemRegion mr); 730 void clearRangeNextBitmap(MemRegion mr); 731 void clearRangeBothBitmaps(MemRegion mr); 732 733 // Notify data structures that a GC has started. 734 void note_start_of_gc() { 735 _markStack.note_start_of_gc(); 736 } 737 738 // Notify data structures that a GC is finished. 739 void note_end_of_gc() { 740 _markStack.note_end_of_gc(); 741 } 742 743 // Verify that there are no CSet oops on the stacks (taskqueues / 744 // global mark stack), enqueued SATB buffers, per-thread SATB 745 // buffers, and fingers (global / per-task). The boolean parameters 746 // decide which of the above data structures to verify. If marking 747 // is not in progress, it's a no-op. 748 void verify_no_cset_oops(bool verify_stacks, 749 bool verify_enqueued_buffers, 750 bool verify_thread_buffers, 751 bool verify_fingers) PRODUCT_RETURN; 752 753 // It is called at the end of an evacuation pause during marking so 754 // that CM is notified of where the new end of the heap is. It 755 // doesn't do anything if concurrent_marking_in_progress() is false, 756 // unless the force parameter is true. 757 void update_g1_committed(bool force = false); 758 759 bool isMarked(oop p) const { 760 assert(p != NULL && p->is_oop(), "expected an oop"); 761 HeapWord* addr = (HeapWord*)p; 762 assert(addr >= _nextMarkBitMap->startWord() || 763 addr < _nextMarkBitMap->endWord(), "in a region"); 764 765 return _nextMarkBitMap->isMarked(addr); 766 } 767 768 inline bool not_yet_marked(oop p) const; 769 770 // XXX Debug code 771 bool containing_card_is_marked(void* p); 772 bool containing_cards_are_marked(void* start, void* last); 773 774 bool isPrevMarked(oop p) const { 775 assert(p != NULL && p->is_oop(), "expected an oop"); 776 HeapWord* addr = (HeapWord*)p; 777 assert(addr >= _prevMarkBitMap->startWord() || 778 addr < _prevMarkBitMap->endWord(), "in a region"); 779 780 return _prevMarkBitMap->isMarked(addr); 781 } 782 783 inline bool do_yield_check(uint worker_i = 0); 784 inline bool should_yield(); 785 786 // Called to abort the marking cycle after a Full GC takes palce. 787 void abort(); 788 789 // This prints the global/local fingers. It is used for debugging. 790 NOT_PRODUCT(void print_finger();) 791 792 void print_summary_info(); 793 794 void print_worker_threads_on(outputStream* st) const; 795 796 // The following indicate whether a given verbose level has been 797 // set. Notice that anything above stats is conditional to 798 // _MARKING_VERBOSE_ having been set to 1 799 bool verbose_stats() { 800 return _verbose_level >= stats_verbose; 801 } 802 bool verbose_low() { 803 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; 804 } 805 bool verbose_medium() { 806 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; 807 } 808 bool verbose_high() { 809 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; 810 } 811 812 // Liveness counting 813 814 // Utility routine to set an exclusive range of cards on the given 815 // card liveness bitmap 816 inline void set_card_bitmap_range(BitMap* card_bm, 817 BitMap::idx_t start_idx, 818 BitMap::idx_t end_idx, 819 bool is_par); 820 821 // Returns the card number of the bottom of the G1 heap. 822 // Used in biasing indices into accounting card bitmaps. 823 intptr_t heap_bottom_card_num() const { 824 return _heap_bottom_card_num; 825 } 826 827 // Returns the card bitmap for a given task or worker id. 828 BitMap* count_card_bitmap_for(uint worker_id) { 829 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 830 assert(_count_card_bitmaps != NULL, "uninitialized"); 831 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 832 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 833 return task_card_bm; 834 } 835 836 // Returns the array containing the marked bytes for each region, 837 // for the given worker or task id. 838 size_t* count_marked_bytes_array_for(uint worker_id) { 839 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 840 assert(_count_marked_bytes != NULL, "uninitialized"); 841 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 842 assert(marked_bytes_array != NULL, "uninitialized"); 843 return marked_bytes_array; 844 } 845 846 // Returns the index in the liveness accounting card table bitmap 847 // for the given address 848 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 849 850 // Counts the size of the given memory region in the the given 851 // marked_bytes array slot for the given HeapRegion. 852 // Sets the bits in the given card bitmap that are associated with the 853 // cards that are spanned by the memory region. 854 inline void count_region(MemRegion mr, HeapRegion* hr, 855 size_t* marked_bytes_array, 856 BitMap* task_card_bm); 857 858 // Counts the given memory region in the task/worker counting 859 // data structures for the given worker id. 860 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); 861 862 // Counts the given memory region in the task/worker counting 863 // data structures for the given worker id. 864 inline void count_region(MemRegion mr, uint worker_id); 865 866 // Counts the given object in the given task/worker counting 867 // data structures. 868 inline void count_object(oop obj, HeapRegion* hr, 869 size_t* marked_bytes_array, 870 BitMap* task_card_bm); 871 872 // Counts the given object in the task/worker counting data 873 // structures for the given worker id. 874 inline void count_object(oop obj, HeapRegion* hr, uint worker_id); 875 876 // Attempts to mark the given object and, if successful, counts 877 // the object in the given task/worker counting structures. 878 inline bool par_mark_and_count(oop obj, HeapRegion* hr, 879 size_t* marked_bytes_array, 880 BitMap* task_card_bm); 881 882 // Attempts to mark the given object and, if successful, counts 883 // the object in the task/worker counting structures for the 884 // given worker id. 885 inline bool par_mark_and_count(oop obj, size_t word_size, 886 HeapRegion* hr, uint worker_id); 887 888 // Attempts to mark the given object and, if successful, counts 889 // the object in the task/worker counting structures for the 890 // given worker id. 891 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); 892 893 // Similar to the above routine but we don't know the heap region that 894 // contains the object to be marked/counted, which this routine looks up. 895 inline bool par_mark_and_count(oop obj, uint worker_id); 896 897 // Similar to the above routine but there are times when we cannot 898 // safely calculate the size of obj due to races and we, therefore, 899 // pass the size in as a parameter. It is the caller's reponsibility 900 // to ensure that the size passed in for obj is valid. 901 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); 902 903 // Unconditionally mark the given object, and unconditinally count 904 // the object in the counting structures for worker id 0. 905 // Should *not* be called from parallel code. 906 inline bool mark_and_count(oop obj, HeapRegion* hr); 907 908 // Similar to the above routine but we don't know the heap region that 909 // contains the object to be marked/counted, which this routine looks up. 910 // Should *not* be called from parallel code. 911 inline bool mark_and_count(oop obj); 912 913 protected: 914 // Clear all the per-task bitmaps and arrays used to store the 915 // counting data. 916 void clear_all_count_data(); 917 918 // Aggregates the counting data for each worker/task 919 // that was constructed while marking. Also sets 920 // the amount of marked bytes for each region and 921 // the top at concurrent mark count. 922 void aggregate_count_data(); 923 924 // Verification routine 925 void verify_count_data(); 926 }; 927 928 // A class representing a marking task. 929 class CMTask : public TerminatorTerminator { 930 private: 931 enum PrivateConstants { 932 // the regular clock call is called once the scanned words reaches 933 // this limit 934 words_scanned_period = 12*1024, 935 // the regular clock call is called once the number of visited 936 // references reaches this limit 937 refs_reached_period = 384, 938 // initial value for the hash seed, used in the work stealing code 939 init_hash_seed = 17, 940 // how many entries will be transferred between global stack and 941 // local queues 942 global_stack_transfer_size = 16 943 }; 944 945 int _task_id; 946 G1CollectedHeap* _g1h; 947 ConcurrentMark* _cm; 948 CMBitMap* _nextMarkBitMap; 949 // the task queue of this task 950 CMTaskQueue* _task_queue; 951 private: 952 // the task queue set---needed for stealing 953 CMTaskQueueSet* _task_queues; 954 // indicates whether the task has been claimed---this is only for 955 // debugging purposes 956 bool _claimed; 957 958 // number of calls to this task 959 int _calls; 960 961 // when the virtual timer reaches this time, the marking step should 962 // exit 963 double _time_target_ms; 964 // the start time of the current marking step 965 double _start_time_ms; 966 967 // the oop closure used for iterations over oops 968 G1CMOopClosure* _cm_oop_closure; 969 970 // the region this task is scanning, NULL if we're not scanning any 971 HeapRegion* _curr_region; 972 // the local finger of this task, NULL if we're not scanning a region 973 HeapWord* _finger; 974 // limit of the region this task is scanning, NULL if we're not scanning one 975 HeapWord* _region_limit; 976 977 // the number of words this task has scanned 978 size_t _words_scanned; 979 // When _words_scanned reaches this limit, the regular clock is 980 // called. Notice that this might be decreased under certain 981 // circumstances (i.e. when we believe that we did an expensive 982 // operation). 983 size_t _words_scanned_limit; 984 // the initial value of _words_scanned_limit (i.e. what it was 985 // before it was decreased). 986 size_t _real_words_scanned_limit; 987 988 // the number of references this task has visited 989 size_t _refs_reached; 990 // When _refs_reached reaches this limit, the regular clock is 991 // called. Notice this this might be decreased under certain 992 // circumstances (i.e. when we believe that we did an expensive 993 // operation). 994 size_t _refs_reached_limit; 995 // the initial value of _refs_reached_limit (i.e. what it was before 996 // it was decreased). 997 size_t _real_refs_reached_limit; 998 999 // used by the work stealing stuff 1000 int _hash_seed; 1001 // if this is true, then the task has aborted for some reason 1002 bool _has_aborted; 1003 // set when the task aborts because it has met its time quota 1004 bool _has_timed_out; 1005 // true when we're draining SATB buffers; this avoids the task 1006 // aborting due to SATB buffers being available (as we're already 1007 // dealing with them) 1008 bool _draining_satb_buffers; 1009 1010 // number sequence of past step times 1011 NumberSeq _step_times_ms; 1012 // elapsed time of this task 1013 double _elapsed_time_ms; 1014 // termination time of this task 1015 double _termination_time_ms; 1016 // when this task got into the termination protocol 1017 double _termination_start_time_ms; 1018 1019 // true when the task is during a concurrent phase, false when it is 1020 // in the remark phase (so, in the latter case, we do not have to 1021 // check all the things that we have to check during the concurrent 1022 // phase, i.e. SATB buffer availability...) 1023 bool _concurrent; 1024 1025 TruncatedSeq _marking_step_diffs_ms; 1026 1027 // Counting data structures. Embedding the task's marked_bytes_array 1028 // and card bitmap into the actual task saves having to go through 1029 // the ConcurrentMark object. 1030 size_t* _marked_bytes_array; 1031 BitMap* _card_bm; 1032 1033 // LOTS of statistics related with this task 1034 #if _MARKING_STATS_ 1035 NumberSeq _all_clock_intervals_ms; 1036 double _interval_start_time_ms; 1037 1038 int _aborted; 1039 int _aborted_overflow; 1040 int _aborted_cm_aborted; 1041 int _aborted_yield; 1042 int _aborted_timed_out; 1043 int _aborted_satb; 1044 int _aborted_termination; 1045 1046 int _steal_attempts; 1047 int _steals; 1048 1049 int _clock_due_to_marking; 1050 int _clock_due_to_scanning; 1051 1052 int _local_pushes; 1053 int _local_pops; 1054 int _local_max_size; 1055 int _objs_scanned; 1056 1057 int _global_pushes; 1058 int _global_pops; 1059 int _global_max_size; 1060 1061 int _global_transfers_to; 1062 int _global_transfers_from; 1063 1064 int _regions_claimed; 1065 int _objs_found_on_bitmap; 1066 1067 int _satb_buffers_processed; 1068 #endif // _MARKING_STATS_ 1069 1070 // it updates the local fields after this task has claimed 1071 // a new region to scan 1072 void setup_for_region(HeapRegion* hr); 1073 // it brings up-to-date the limit of the region 1074 void update_region_limit(); 1075 1076 // called when either the words scanned or the refs visited limit 1077 // has been reached 1078 void reached_limit(); 1079 // recalculates the words scanned and refs visited limits 1080 void recalculate_limits(); 1081 // decreases the words scanned and refs visited limits when we reach 1082 // an expensive operation 1083 void decrease_limits(); 1084 // it checks whether the words scanned or refs visited reached their 1085 // respective limit and calls reached_limit() if they have 1086 void check_limits() { 1087 if (_words_scanned >= _words_scanned_limit || 1088 _refs_reached >= _refs_reached_limit) { 1089 reached_limit(); 1090 } 1091 } 1092 // this is supposed to be called regularly during a marking step as 1093 // it checks a bunch of conditions that might cause the marking step 1094 // to abort 1095 void regular_clock_call(); 1096 bool concurrent() { return _concurrent; } 1097 1098 public: 1099 // It resets the task; it should be called right at the beginning of 1100 // a marking phase. 1101 void reset(CMBitMap* _nextMarkBitMap); 1102 // it clears all the fields that correspond to a claimed region. 1103 void clear_region_fields(); 1104 1105 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 1106 1107 // The main method of this class which performs a marking step 1108 // trying not to exceed the given duration. However, it might exit 1109 // prematurely, according to some conditions (i.e. SATB buffers are 1110 // available for processing). 1111 void do_marking_step(double target_ms, bool do_stealing, bool do_termination); 1112 1113 // These two calls start and stop the timer 1114 void record_start_time() { 1115 _elapsed_time_ms = os::elapsedTime() * 1000.0; 1116 } 1117 void record_end_time() { 1118 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1119 } 1120 1121 // returns the task ID 1122 int task_id() { return _task_id; } 1123 1124 // From TerminatorTerminator. It determines whether this task should 1125 // exit the termination protocol after it's entered it. 1126 virtual bool should_exit_termination(); 1127 1128 // Resets the local region fields after a task has finished scanning a 1129 // region; or when they have become stale as a result of the region 1130 // being evacuated. 1131 void giveup_current_region(); 1132 1133 HeapWord* finger() { return _finger; } 1134 1135 bool has_aborted() { return _has_aborted; } 1136 void set_has_aborted() { _has_aborted = true; } 1137 void clear_has_aborted() { _has_aborted = false; } 1138 bool has_timed_out() { return _has_timed_out; } 1139 bool claimed() { return _claimed; } 1140 1141 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 1142 1143 // It grays the object by marking it and, if necessary, pushing it 1144 // on the local queue 1145 inline void deal_with_reference(oop obj); 1146 1147 // It scans an object and visits its children. 1148 void scan_object(oop obj); 1149 1150 // It pushes an object on the local queue. 1151 inline void push(oop obj); 1152 1153 // These two move entries to/from the global stack. 1154 void move_entries_to_global_stack(); 1155 void get_entries_from_global_stack(); 1156 1157 // It pops and scans objects from the local queue. If partially is 1158 // true, then it stops when the queue size is of a given limit. If 1159 // partially is false, then it stops when the queue is empty. 1160 void drain_local_queue(bool partially); 1161 // It moves entries from the global stack to the local queue and 1162 // drains the local queue. If partially is true, then it stops when 1163 // both the global stack and the local queue reach a given size. If 1164 // partially if false, it tries to empty them totally. 1165 void drain_global_stack(bool partially); 1166 // It keeps picking SATB buffers and processing them until no SATB 1167 // buffers are available. 1168 void drain_satb_buffers(); 1169 1170 // moves the local finger to a new location 1171 inline void move_finger_to(HeapWord* new_finger) { 1172 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 1173 _finger = new_finger; 1174 } 1175 1176 CMTask(int task_num, ConcurrentMark *cm, 1177 size_t* marked_bytes, BitMap* card_bm, 1178 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1179 1180 // it prints statistics associated with this task 1181 void print_stats(); 1182 1183 #if _MARKING_STATS_ 1184 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } 1185 #endif // _MARKING_STATS_ 1186 }; 1187 1188 // Class that's used to to print out per-region liveness 1189 // information. It's currently used at the end of marking and also 1190 // after we sort the old regions at the end of the cleanup operation. 1191 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 1192 private: 1193 outputStream* _out; 1194 1195 // Accumulators for these values. 1196 size_t _total_used_bytes; 1197 size_t _total_capacity_bytes; 1198 size_t _total_prev_live_bytes; 1199 size_t _total_next_live_bytes; 1200 1201 // These are set up when we come across a "stars humongous" region 1202 // (as this is where most of this information is stored, not in the 1203 // subsequent "continues humongous" regions). After that, for every 1204 // region in a given humongous region series we deduce the right 1205 // values for it by simply subtracting the appropriate amount from 1206 // these fields. All these values should reach 0 after we've visited 1207 // the last region in the series. 1208 size_t _hum_used_bytes; 1209 size_t _hum_capacity_bytes; 1210 size_t _hum_prev_live_bytes; 1211 size_t _hum_next_live_bytes; 1212 1213 static double perc(size_t val, size_t total) { 1214 if (total == 0) { 1215 return 0.0; 1216 } else { 1217 return 100.0 * ((double) val / (double) total); 1218 } 1219 } 1220 1221 static double bytes_to_mb(size_t val) { 1222 return (double) val / (double) M; 1223 } 1224 1225 // See the .cpp file. 1226 size_t get_hum_bytes(size_t* hum_bytes); 1227 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 1228 size_t* prev_live_bytes, size_t* next_live_bytes); 1229 1230 public: 1231 // The header and footer are printed in the constructor and 1232 // destructor respectively. 1233 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); 1234 virtual bool doHeapRegion(HeapRegion* r); 1235 ~G1PrintRegionLivenessInfoClosure(); 1236 }; 1237 1238 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP