1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 27 28 #include "gc_implementation/g1/heapRegionSets.hpp" 29 #include "utilities/taskqueue.hpp" 30 31 class G1CollectedHeap; 32 class CMTask; 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; 35 36 // Closure used by CM during concurrent reference discovery 37 // and reference processing (during remarking) to determine 38 // if a particular object is alive. It is primarily used 39 // to determine if referents of discovered reference objects 40 // are alive. An instance is also embedded into the 41 // reference processor as the _is_alive_non_header field 42 class G1CMIsAliveClosure: public BoolObjectClosure { 43 G1CollectedHeap* _g1; 44 public: 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 46 47 void do_object(oop obj) { 48 ShouldNotCallThis(); 49 } 50 bool do_object_b(oop obj); 51 }; 52 53 // A generic CM bit map. This is essentially a wrapper around the BitMap 54 // class, with one bit per (1<<_shifter) HeapWords. 55 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { 57 protected: 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // map to char or bit 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 CMBitMapRO(ReservedSpace rs, int shifter); 67 68 enum { do_yield = true }; 69 70 // inquiries 71 HeapWord* startWord() const { return _bmStartWord; } 72 size_t sizeInWords() const { return _bmWordSize; } 73 // the following is one past the last word in space 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 75 76 // read marks 77 78 bool isMarked(HeapWord* addr) const { 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 80 "outside underlying space?"); 81 return _bm.at(heapWordToOffset(addr)); 82 } 83 84 // iteration 85 inline bool iterate(BitMapClosure* cl, MemRegion mr); 86 inline bool iterate(BitMapClosure* cl); 87 88 // Return the address corresponding to the next marked bit at or after 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr, 92 HeapWord* limit = NULL) const; 93 // Return the address corresponding to the next unmarked bit at or after 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, 97 HeapWord* limit = NULL) const; 98 99 // conversion utilities 100 // XXX Fix these so that offsets are size_t's... 101 HeapWord* offsetToHeapWord(size_t offset) const { 102 return _bmStartWord + (offset << _shifter); 103 } 104 size_t heapWordToOffset(HeapWord* addr) const { 105 return pointer_delta(addr, _bmStartWord) >> _shifter; 106 } 107 int heapWordDiffToOffsetDiff(size_t diff) const; 108 HeapWord* nextWord(HeapWord* addr) { 109 return offsetToHeapWord(heapWordToOffset(addr) + 1); 110 } 111 112 // debugging 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;) 114 }; 115 116 class CMBitMap : public CMBitMapRO { 117 118 public: 119 // constructor 120 CMBitMap(ReservedSpace rs, int shifter) : 121 CMBitMapRO(rs, shifter) {} 122 123 // write marks 124 void mark(HeapWord* addr) { 125 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 126 "outside underlying space?"); 127 _bm.set_bit(heapWordToOffset(addr)); 128 } 129 void clear(HeapWord* addr) { 130 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 131 "outside underlying space?"); 132 _bm.clear_bit(heapWordToOffset(addr)); 133 } 134 bool parMark(HeapWord* addr) { 135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 136 "outside underlying space?"); 137 return _bm.par_set_bit(heapWordToOffset(addr)); 138 } 139 bool parClear(HeapWord* addr) { 140 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 141 "outside underlying space?"); 142 return _bm.par_clear_bit(heapWordToOffset(addr)); 143 } 144 void markRange(MemRegion mr); 145 void clearAll(); 146 void clearRange(MemRegion mr); 147 148 // Starting at the bit corresponding to "addr" (inclusive), find the next 149 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find 150 // the end of this run (stopping at "end_addr"). Return the MemRegion 151 // covering from the start of the region corresponding to the first bit 152 // of the run to the end of the region corresponding to the last bit of 153 // the run. If there is no "1" bit at or after "addr", return an empty 154 // MemRegion. 155 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); 156 }; 157 158 // Represents a marking stack used by the CM collector. 159 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 160 class CMMarkStack VALUE_OBJ_CLASS_SPEC { 161 ConcurrentMark* _cm; 162 oop* _base; // bottom of stack 163 jint _index; // one more than last occupied index 164 jint _capacity; // max #elements 165 jint _saved_index; // value of _index saved at start of GC 166 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run 167 VirtualSpace _virtual_space; // Underlying backing store for actual stack 168 169 bool _overflow; 170 DEBUG_ONLY(bool _drain_in_progress;) 171 DEBUG_ONLY(bool _drain_in_progress_yields;) 172 173 public: 174 CMMarkStack(ConcurrentMark* cm); 175 ~CMMarkStack(); 176 177 void allocate(size_t size); 178 179 oop pop() { 180 if (!isEmpty()) { 181 return _base[--_index] ; 182 } 183 return NULL; 184 } 185 186 // If overflow happens, don't do the push, and record the overflow. 187 // *Requires* that "ptr" is already marked. 188 void push(oop ptr) { 189 if (isFull()) { 190 // Record overflow. 191 _overflow = true; 192 return; 193 } else { 194 _base[_index++] = ptr; 195 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 196 } 197 } 198 // Non-block impl. Note: concurrency is allowed only with other 199 // "par_push" operations, not with "pop" or "drain". We would need 200 // parallel versions of them if such concurrency was desired. 201 void par_push(oop ptr); 202 203 // Pushes the first "n" elements of "ptr_arr" on the stack. 204 // Non-block impl. Note: concurrency is allowed only with other 205 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". 206 void par_adjoin_arr(oop* ptr_arr, int n); 207 208 // Pushes the first "n" elements of "ptr_arr" on the stack. 209 // Locking impl: concurrency is allowed only with 210 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 211 // locking strategy. 212 void par_push_arr(oop* ptr_arr, int n); 213 214 // If returns false, the array was empty. Otherwise, removes up to "max" 215 // elements from the stack, and transfers them to "ptr_arr" in an 216 // unspecified order. The actual number transferred is given in "n" ("n 217 // == 0" is deliberately redundant with the return value.) Locking impl: 218 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 219 // operations, which use the same locking strategy. 220 bool par_pop_arr(oop* ptr_arr, int max, int* n); 221 222 // Drain the mark stack, applying the given closure to all fields of 223 // objects on the stack. (That is, continue until the stack is empty, 224 // even if closure applications add entries to the stack.) The "bm" 225 // argument, if non-null, may be used to verify that only marked objects 226 // are on the mark stack. If "yield_after" is "true", then the 227 // concurrent marker performing the drain offers to yield after 228 // processing each object. If a yield occurs, stops the drain operation 229 // and returns false. Otherwise, returns true. 230 template<class OopClosureClass> 231 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); 232 233 bool isEmpty() { return _index == 0; } 234 bool isFull() { return _index == _capacity; } 235 int maxElems() { return _capacity; } 236 237 bool overflow() { return _overflow; } 238 void clear_overflow() { _overflow = false; } 239 240 int size() { return _index; } 241 242 void setEmpty() { _index = 0; clear_overflow(); } 243 244 // Record the current index. 245 void note_start_of_gc(); 246 247 // Make sure that we have not added any entries to the stack during GC. 248 void note_end_of_gc(); 249 250 // iterate over the oops in the mark stack, up to the bound recorded via 251 // the call above. 252 void oops_do(OopClosure* f); 253 }; 254 255 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { 256 private: 257 #ifndef PRODUCT 258 uintx _num_remaining; 259 bool _force; 260 #endif // !defined(PRODUCT) 261 262 public: 263 void init() PRODUCT_RETURN; 264 void update() PRODUCT_RETURN; 265 bool should_force() PRODUCT_RETURN_( return false; ); 266 }; 267 268 // this will enable a variety of different statistics per GC task 269 #define _MARKING_STATS_ 0 270 // this will enable the higher verbose levels 271 #define _MARKING_VERBOSE_ 0 272 273 #if _MARKING_STATS_ 274 #define statsOnly(statement) \ 275 do { \ 276 statement ; \ 277 } while (0) 278 #else // _MARKING_STATS_ 279 #define statsOnly(statement) \ 280 do { \ 281 } while (0) 282 #endif // _MARKING_STATS_ 283 284 typedef enum { 285 no_verbose = 0, // verbose turned off 286 stats_verbose, // only prints stats at the end of marking 287 low_verbose, // low verbose, mostly per region and per major event 288 medium_verbose, // a bit more detailed than low 289 high_verbose // per object verbose 290 } CMVerboseLevel; 291 292 class YoungList; 293 294 // Root Regions are regions that are not empty at the beginning of a 295 // marking cycle and which we might collect during an evacuation pause 296 // while the cycle is active. Given that, during evacuation pauses, we 297 // do not copy objects that are explicitly marked, what we have to do 298 // for the root regions is to scan them and mark all objects reachable 299 // from them. According to the SATB assumptions, we only need to visit 300 // each object once during marking. So, as long as we finish this scan 301 // before the next evacuation pause, we can copy the objects from the 302 // root regions without having to mark them or do anything else to them. 303 // 304 // Currently, we only support root region scanning once (at the start 305 // of the marking cycle) and the root regions are all the survivor 306 // regions populated during the initial-mark pause. 307 class CMRootRegions VALUE_OBJ_CLASS_SPEC { 308 private: 309 YoungList* _young_list; 310 ConcurrentMark* _cm; 311 312 volatile bool _scan_in_progress; 313 volatile bool _should_abort; 314 HeapRegion* volatile _next_survivor; 315 316 public: 317 CMRootRegions(); 318 // We actually do most of the initialization in this method. 319 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); 320 321 // Reset the claiming / scanning of the root regions. 322 void prepare_for_scan(); 323 324 // Forces get_next() to return NULL so that the iteration aborts early. 325 void abort() { _should_abort = true; } 326 327 // Return true if the CM thread are actively scanning root regions, 328 // false otherwise. 329 bool scan_in_progress() { return _scan_in_progress; } 330 331 // Claim the next root region to scan atomically, or return NULL if 332 // all have been claimed. 333 HeapRegion* claim_next(); 334 335 // Flag that we're done with root region scanning and notify anyone 336 // who's waiting on it. If aborted is false, assume that all regions 337 // have been claimed. 338 void scan_finished(); 339 340 // If CM threads are still scanning root regions, wait until they 341 // are done. Return true if we had to wait, false otherwise. 342 bool wait_until_scan_finished(); 343 }; 344 345 class ConcurrentMarkThread; 346 347 class ConcurrentMark: public CHeapObj<mtGC> { 348 friend class ConcurrentMarkThread; 349 friend class CMTask; 350 friend class CMBitMapClosure; 351 friend class CMGlobalObjectClosure; 352 friend class CMRemarkTask; 353 friend class CMConcurrentMarkingTask; 354 friend class G1ParNoteEndTask; 355 friend class CalcLiveObjectsClosure; 356 friend class G1CMRefProcTaskProxy; 357 friend class G1CMRefProcTaskExecutor; 358 friend class G1CMParKeepAliveAndDrainClosure; 359 friend class G1CMParDrainMarkingStackClosure; 360 361 protected: 362 ConcurrentMarkThread* _cmThread; // the thread doing the work 363 G1CollectedHeap* _g1h; // the heap. 364 uint _parallel_marking_threads; // the number of marking 365 // threads we're use 366 uint _max_parallel_marking_threads; // max number of marking 367 // threads we'll ever use 368 double _sleep_factor; // how much we have to sleep, with 369 // respect to the work we just did, to 370 // meet the marking overhead goal 371 double _marking_task_overhead; // marking target overhead for 372 // a single task 373 374 // same as the two above, but for the cleanup task 375 double _cleanup_sleep_factor; 376 double _cleanup_task_overhead; 377 378 FreeRegionList _cleanup_list; 379 380 // Concurrent marking support structures 381 CMBitMap _markBitMap1; 382 CMBitMap _markBitMap2; 383 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap 384 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap 385 386 BitMap _region_bm; 387 BitMap _card_bm; 388 389 VirtualSpace _virtual_space; // Backing store for worker liveness 390 // counting structures 391 392 // Heap bounds 393 HeapWord* _heap_start; 394 HeapWord* _heap_end; 395 396 // Root region tracking and claiming. 397 CMRootRegions _root_regions; 398 399 // For gray objects 400 CMMarkStack _markStack; // Grey objects behind global finger. 401 HeapWord* volatile _finger; // the global finger, region aligned, 402 // always points to the end of the 403 // last claimed region 404 405 // marking tasks 406 uint _max_task_num; // maximum task number 407 uint _active_tasks; // task num currently active 408 CMTask** _tasks; // task queue array (max_task_num len) 409 CMTaskQueueSet* _task_queues; // task queue set 410 ParallelTaskTerminator _terminator; // for termination 411 412 // Two sync barriers that are used to synchronise tasks when an 413 // overflow occurs. The algorithm is the following. All tasks enter 414 // the first one to ensure that they have all stopped manipulating 415 // the global data structures. After they exit it, they re-initialise 416 // their data structures and task 0 re-initialises the global data 417 // structures. Then, they enter the second sync barrier. This 418 // ensure, that no task starts doing work before all data 419 // structures (local and global) have been re-initialised. When they 420 // exit it, they are free to start working again. 421 WorkGangBarrierSync _first_overflow_barrier_sync; 422 WorkGangBarrierSync _second_overflow_barrier_sync; 423 424 // this is set by any task, when an overflow on the global data 425 // structures is detected. 426 volatile bool _has_overflown; 427 // true: marking is concurrent, false: we're in remark 428 volatile bool _concurrent; 429 // set at the end of a Full GC so that marking aborts 430 volatile bool _has_aborted; 431 432 // used when remark aborts due to an overflow to indicate that 433 // another concurrent marking phase should start 434 volatile bool _restart_for_overflow; 435 436 // This is true from the very start of concurrent marking until the 437 // point when all the tasks complete their work. It is really used 438 // to determine the points between the end of concurrent marking and 439 // time of remark. 440 volatile bool _concurrent_marking_in_progress; 441 442 // verbose level 443 CMVerboseLevel _verbose_level; 444 445 // All of these times are in ms. 446 NumberSeq _init_times; 447 NumberSeq _remark_times; 448 NumberSeq _remark_mark_times; 449 NumberSeq _remark_weak_ref_times; 450 NumberSeq _cleanup_times; 451 double _total_counting_time; 452 double _total_rs_scrub_time; 453 454 double* _accum_task_vtime; // accumulated task vtime 455 456 FlexibleWorkGang* _parallel_workers; 457 458 ForceOverflowSettings _force_overflow_conc; 459 ForceOverflowSettings _force_overflow_stw; 460 461 void weakRefsWork(bool clear_all_soft_refs); 462 463 void swapMarkBitMaps(); 464 465 // It resets the global marking data structures, as well as the 466 // task local ones; should be called during initial mark. 467 void reset(); 468 // It resets all the marking data structures. 469 void clear_marking_state(bool clear_overflow = true); 470 471 // It should be called to indicate which phase we're in (concurrent 472 // mark or remark) and how many threads are currently active. 473 void set_phase(uint active_tasks, bool concurrent); 474 // We do this after we're done with marking so that the marking data 475 // structures are initialised to a sensible and predictable state. 476 void set_non_marking_state(); 477 478 // prints all gathered CM-related statistics 479 void print_stats(); 480 481 bool cleanup_list_is_empty() { 482 return _cleanup_list.is_empty(); 483 } 484 485 // accessor methods 486 uint parallel_marking_threads() { return _parallel_marking_threads; } 487 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;} 488 double sleep_factor() { return _sleep_factor; } 489 double marking_task_overhead() { return _marking_task_overhead;} 490 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } 491 double cleanup_task_overhead() { return _cleanup_task_overhead;} 492 493 HeapWord* finger() { return _finger; } 494 bool concurrent() { return _concurrent; } 495 uint active_tasks() { return _active_tasks; } 496 ParallelTaskTerminator* terminator() { return &_terminator; } 497 498 // It claims the next available region to be scanned by a marking 499 // task. It might return NULL if the next region is empty or we have 500 // run out of regions. In the latter case, out_of_regions() 501 // determines whether we've really run out of regions or the task 502 // should call claim_region() again. This might seem a bit 503 // awkward. Originally, the code was written so that claim_region() 504 // either successfully returned with a non-empty region or there 505 // were no more regions to be claimed. The problem with this was 506 // that, in certain circumstances, it iterated over large chunks of 507 // the heap finding only empty regions and, while it was working, it 508 // was preventing the calling task to call its regular clock 509 // method. So, this way, each task will spend very little time in 510 // claim_region() and is allowed to call the regular clock method 511 // frequently. 512 HeapRegion* claim_region(int task); 513 514 // It determines whether we've run out of regions to scan. 515 bool out_of_regions() { return _finger == _heap_end; } 516 517 // Returns the task with the given id 518 CMTask* task(int id) { 519 assert(0 <= id && id < (int) _active_tasks, 520 "task id not within active bounds"); 521 return _tasks[id]; 522 } 523 524 // Returns the task queue with the given id 525 CMTaskQueue* task_queue(int id) { 526 assert(0 <= id && id < (int) _active_tasks, 527 "task queue id not within active bounds"); 528 return (CMTaskQueue*) _task_queues->queue(id); 529 } 530 531 // Returns the task queue set 532 CMTaskQueueSet* task_queues() { return _task_queues; } 533 534 // Access / manipulation of the overflow flag which is set to 535 // indicate that the global stack has overflown 536 bool has_overflown() { return _has_overflown; } 537 void set_has_overflown() { _has_overflown = true; } 538 void clear_has_overflown() { _has_overflown = false; } 539 bool restart_for_overflow() { return _restart_for_overflow; } 540 541 bool has_aborted() { return _has_aborted; } 542 543 // Methods to enter the two overflow sync barriers 544 void enter_first_sync_barrier(int task_num); 545 void enter_second_sync_barrier(int task_num); 546 547 ForceOverflowSettings* force_overflow_conc() { 548 return &_force_overflow_conc; 549 } 550 551 ForceOverflowSettings* force_overflow_stw() { 552 return &_force_overflow_stw; 553 } 554 555 ForceOverflowSettings* force_overflow() { 556 if (concurrent()) { 557 return force_overflow_conc(); 558 } else { 559 return force_overflow_stw(); 560 } 561 } 562 563 // Live Data Counting data structures... 564 // These data structures are initialized at the start of 565 // marking. They are written to while marking is active. 566 // They are aggregated during remark; the aggregated values 567 // are then used to populate the _region_bm, _card_bm, and 568 // the total live bytes, which are then subsequently updated 569 // during cleanup. 570 571 // An array of bitmaps (one bit map per task). Each bitmap 572 // is used to record the cards spanned by the live objects 573 // marked by that task/worker. 574 BitMap* _count_card_bitmaps; 575 576 // Used to record the number of marked live bytes 577 // (for each region, by worker thread). 578 size_t** _count_marked_bytes; 579 580 // Card index of the bottom of the G1 heap. Used for biasing indices into 581 // the card bitmaps. 582 intptr_t _heap_bottom_card_num; 583 584 public: 585 // Manipulation of the global mark stack. 586 // Notice that the first mark_stack_push is CAS-based, whereas the 587 // two below are Mutex-based. This is OK since the first one is only 588 // called during evacuation pauses and doesn't compete with the 589 // other two (which are called by the marking tasks during 590 // concurrent marking or remark). 591 bool mark_stack_push(oop p) { 592 _markStack.par_push(p); 593 if (_markStack.overflow()) { 594 set_has_overflown(); 595 return false; 596 } 597 return true; 598 } 599 bool mark_stack_push(oop* arr, int n) { 600 _markStack.par_push_arr(arr, n); 601 if (_markStack.overflow()) { 602 set_has_overflown(); 603 return false; 604 } 605 return true; 606 } 607 void mark_stack_pop(oop* arr, int max, int* n) { 608 _markStack.par_pop_arr(arr, max, n); 609 } 610 size_t mark_stack_size() { return _markStack.size(); } 611 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 612 bool mark_stack_overflow() { return _markStack.overflow(); } 613 bool mark_stack_empty() { return _markStack.isEmpty(); } 614 615 CMRootRegions* root_regions() { return &_root_regions; } 616 617 bool concurrent_marking_in_progress() { 618 return _concurrent_marking_in_progress; 619 } 620 void set_concurrent_marking_in_progress() { 621 _concurrent_marking_in_progress = true; 622 } 623 void clear_concurrent_marking_in_progress() { 624 _concurrent_marking_in_progress = false; 625 } 626 627 void update_accum_task_vtime(int i, double vtime) { 628 _accum_task_vtime[i] += vtime; 629 } 630 631 double all_task_accum_vtime() { 632 double ret = 0.0; 633 for (int i = 0; i < (int)_max_task_num; ++i) 634 ret += _accum_task_vtime[i]; 635 return ret; 636 } 637 638 // Attempts to steal an object from the task queues of other tasks 639 bool try_stealing(int task_num, int* hash_seed, oop& obj) { 640 return _task_queues->steal(task_num, hash_seed, obj); 641 } 642 643 ConcurrentMark(ReservedSpace heap_rs, uint max_regions); 644 ~ConcurrentMark(); 645 646 ConcurrentMarkThread* cmThread() { return _cmThread; } 647 648 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 649 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 650 651 // Returns the number of GC threads to be used in a concurrent 652 // phase based on the number of GC threads being used in a STW 653 // phase. 654 uint scale_parallel_threads(uint n_par_threads); 655 656 // Calculates the number of GC threads to be used in a concurrent phase. 657 uint calc_parallel_marking_threads(); 658 659 // The following three are interaction between CM and 660 // G1CollectedHeap 661 662 // This notifies CM that a root during initial-mark needs to be 663 // grayed. It is MT-safe. word_size is the size of the object in 664 // words. It is passed explicitly as sometimes we cannot calculate 665 // it from the given object because it might be in an inconsistent 666 // state (e.g., in to-space and being copied). So the caller is 667 // responsible for dealing with this issue (e.g., get the size from 668 // the from-space image when the to-space image might be 669 // inconsistent) and always passing the size. hr is the region that 670 // contains the object and it's passed optionally from callers who 671 // might already have it (no point in recalculating it). 672 inline void grayRoot(oop obj, size_t word_size, 673 uint worker_id, HeapRegion* hr = NULL); 674 675 // It iterates over the heap and for each object it comes across it 676 // will dump the contents of its reference fields, as well as 677 // liveness information for the object and its referents. The dump 678 // will be written to a file with the following name: 679 // G1PrintReachableBaseFile + "." + str. 680 // vo decides whether the prev (vo == UsePrevMarking), the next 681 // (vo == UseNextMarking) marking information, or the mark word 682 // (vo == UseMarkWord) will be used to determine the liveness of 683 // each object / referent. 684 // If all is true, all objects in the heap will be dumped, otherwise 685 // only the live ones. In the dump the following symbols / breviations 686 // are used: 687 // M : an explicitly live object (its bitmap bit is set) 688 // > : an implicitly live object (over tams) 689 // O : an object outside the G1 heap (typically: in the perm gen) 690 // NOT : a reference field whose referent is not live 691 // AND MARKED : indicates that an object is both explicitly and 692 // implicitly live (it should be one or the other, not both) 693 void print_reachable(const char* str, 694 VerifyOption vo, bool all) PRODUCT_RETURN; 695 696 // Clear the next marking bitmap (will be called concurrently). 697 void clearNextBitmap(); 698 699 // These two do the work that needs to be done before and after the 700 // initial root checkpoint. Since this checkpoint can be done at two 701 // different points (i.e. an explicit pause or piggy-backed on a 702 // young collection), then it's nice to be able to easily share the 703 // pre/post code. It might be the case that we can put everything in 704 // the post method. TP 705 void checkpointRootsInitialPre(); 706 void checkpointRootsInitialPost(); 707 708 // Scan all the root regions and mark everything reachable from 709 // them. 710 void scanRootRegions(); 711 712 // Scan a single root region and mark everything reachable from it. 713 void scanRootRegion(HeapRegion* hr, uint worker_id); 714 715 // Do concurrent phase of marking, to a tentative transitive closure. 716 void markFromRoots(); 717 718 void checkpointRootsFinal(bool clear_all_soft_refs); 719 void checkpointRootsFinalWork(); 720 void cleanup(); 721 void completeCleanup(); 722 723 // Mark in the previous bitmap. NB: this is usually read-only, so use 724 // this carefully! 725 inline void markPrev(oop p); 726 727 // Clears marks for all objects in the given range, for the prev, 728 // next, or both bitmaps. NB: the previous bitmap is usually 729 // read-only, so use this carefully! 730 void clearRangePrevBitmap(MemRegion mr); 731 void clearRangeNextBitmap(MemRegion mr); 732 void clearRangeBothBitmaps(MemRegion mr); 733 734 // Notify data structures that a GC has started. 735 void note_start_of_gc() { 736 _markStack.note_start_of_gc(); 737 } 738 739 // Notify data structures that a GC is finished. 740 void note_end_of_gc() { 741 _markStack.note_end_of_gc(); 742 } 743 744 // Verify that there are no CSet oops on the stacks (taskqueues / 745 // global mark stack), enqueued SATB buffers, per-thread SATB 746 // buffers, and fingers (global / per-task). The boolean parameters 747 // decide which of the above data structures to verify. If marking 748 // is not in progress, it's a no-op. 749 void verify_no_cset_oops(bool verify_stacks, 750 bool verify_enqueued_buffers, 751 bool verify_thread_buffers, 752 bool verify_fingers) PRODUCT_RETURN; 753 754 // It is called at the end of an evacuation pause during marking so 755 // that CM is notified of where the new end of the heap is. It 756 // doesn't do anything if concurrent_marking_in_progress() is false, 757 // unless the force parameter is true. 758 void update_g1_committed(bool force = false); 759 760 bool isMarked(oop p) const { 761 assert(p != NULL && p->is_oop(), "expected an oop"); 762 HeapWord* addr = (HeapWord*)p; 763 assert(addr >= _nextMarkBitMap->startWord() || 764 addr < _nextMarkBitMap->endWord(), "in a region"); 765 766 return _nextMarkBitMap->isMarked(addr); 767 } 768 769 inline bool not_yet_marked(oop p) const; 770 771 // XXX Debug code 772 bool containing_card_is_marked(void* p); 773 bool containing_cards_are_marked(void* start, void* last); 774 775 bool isPrevMarked(oop p) const { 776 assert(p != NULL && p->is_oop(), "expected an oop"); 777 HeapWord* addr = (HeapWord*)p; 778 assert(addr >= _prevMarkBitMap->startWord() || 779 addr < _prevMarkBitMap->endWord(), "in a region"); 780 781 return _prevMarkBitMap->isMarked(addr); 782 } 783 784 inline bool do_yield_check(uint worker_i = 0); 785 inline bool should_yield(); 786 787 // Called to abort the marking cycle after a Full GC takes palce. 788 void abort(); 789 790 // This prints the global/local fingers. It is used for debugging. 791 NOT_PRODUCT(void print_finger();) 792 793 void print_summary_info(); 794 795 void print_worker_threads_on(outputStream* st) const; 796 797 // The following indicate whether a given verbose level has been 798 // set. Notice that anything above stats is conditional to 799 // _MARKING_VERBOSE_ having been set to 1 800 bool verbose_stats() { 801 return _verbose_level >= stats_verbose; 802 } 803 bool verbose_low() { 804 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; 805 } 806 bool verbose_medium() { 807 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; 808 } 809 bool verbose_high() { 810 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; 811 } 812 813 // Counting data structure accessors 814 815 // Returns the card number of the bottom of the G1 heap. 816 // Used in biasing indices into accounting card bitmaps. 817 intptr_t heap_bottom_card_num() const { 818 return _heap_bottom_card_num; 819 } 820 821 // Returns the card bitmap for a given task or worker id. 822 BitMap* count_card_bitmap_for(uint worker_id) { 823 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 824 assert(_count_card_bitmaps != NULL, "uninitialized"); 825 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 826 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 827 return task_card_bm; 828 } 829 830 // Returns the array containing the marked bytes for each region, 831 // for the given worker or task id. 832 size_t* count_marked_bytes_array_for(uint worker_id) { 833 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 834 assert(_count_marked_bytes != NULL, "uninitialized"); 835 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 836 assert(marked_bytes_array != NULL, "uninitialized"); 837 return marked_bytes_array; 838 } 839 840 // Returns the index in the liveness accounting card table bitmap 841 // for the given address 842 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 843 844 // Counts the size of the given memory region in the the given 845 // marked_bytes array slot for the given HeapRegion. 846 // Sets the bits in the given card bitmap that are associated with the 847 // cards that are spanned by the memory region. 848 inline void count_region(MemRegion mr, HeapRegion* hr, 849 size_t* marked_bytes_array, 850 BitMap* task_card_bm); 851 852 // Counts the given memory region in the task/worker counting 853 // data structures for the given worker id. 854 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); 855 856 // Counts the given memory region in the task/worker counting 857 // data structures for the given worker id. 858 inline void count_region(MemRegion mr, uint worker_id); 859 860 // Counts the given object in the given task/worker counting 861 // data structures. 862 inline void count_object(oop obj, HeapRegion* hr, 863 size_t* marked_bytes_array, 864 BitMap* task_card_bm); 865 866 // Counts the given object in the task/worker counting data 867 // structures for the given worker id. 868 inline void count_object(oop obj, HeapRegion* hr, uint worker_id); 869 870 // Attempts to mark the given object and, if successful, counts 871 // the object in the given task/worker counting structures. 872 inline bool par_mark_and_count(oop obj, HeapRegion* hr, 873 size_t* marked_bytes_array, 874 BitMap* task_card_bm); 875 876 // Attempts to mark the given object and, if successful, counts 877 // the object in the task/worker counting structures for the 878 // given worker id. 879 inline bool par_mark_and_count(oop obj, size_t word_size, 880 HeapRegion* hr, uint worker_id); 881 882 // Attempts to mark the given object and, if successful, counts 883 // the object in the task/worker counting structures for the 884 // given worker id. 885 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); 886 887 // Similar to the above routine but we don't know the heap region that 888 // contains the object to be marked/counted, which this routine looks up. 889 inline bool par_mark_and_count(oop obj, uint worker_id); 890 891 // Similar to the above routine but there are times when we cannot 892 // safely calculate the size of obj due to races and we, therefore, 893 // pass the size in as a parameter. It is the caller's reponsibility 894 // to ensure that the size passed in for obj is valid. 895 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); 896 897 // Unconditionally mark the given object, and unconditinally count 898 // the object in the counting structures for worker id 0. 899 // Should *not* be called from parallel code. 900 inline bool mark_and_count(oop obj, HeapRegion* hr); 901 902 // Similar to the above routine but we don't know the heap region that 903 // contains the object to be marked/counted, which this routine looks up. 904 // Should *not* be called from parallel code. 905 inline bool mark_and_count(oop obj); 906 907 protected: 908 // Clear all the per-task bitmaps and arrays used to store the 909 // counting data. 910 void clear_all_count_data(); 911 912 // Aggregates the counting data for each worker/task 913 // that was constructed while marking. Also sets 914 // the amount of marked bytes for each region and 915 // the top at concurrent mark count. 916 void aggregate_count_data(); 917 918 // Verification routine 919 void verify_count_data(); 920 }; 921 922 // A class representing a marking task. 923 class CMTask : public TerminatorTerminator { 924 private: 925 enum PrivateConstants { 926 // the regular clock call is called once the scanned words reaches 927 // this limit 928 words_scanned_period = 12*1024, 929 // the regular clock call is called once the number of visited 930 // references reaches this limit 931 refs_reached_period = 384, 932 // initial value for the hash seed, used in the work stealing code 933 init_hash_seed = 17, 934 // how many entries will be transferred between global stack and 935 // local queues 936 global_stack_transfer_size = 16 937 }; 938 939 int _task_id; 940 G1CollectedHeap* _g1h; 941 ConcurrentMark* _cm; 942 CMBitMap* _nextMarkBitMap; 943 // the task queue of this task 944 CMTaskQueue* _task_queue; 945 private: 946 // the task queue set---needed for stealing 947 CMTaskQueueSet* _task_queues; 948 // indicates whether the task has been claimed---this is only for 949 // debugging purposes 950 bool _claimed; 951 952 // number of calls to this task 953 int _calls; 954 955 // when the virtual timer reaches this time, the marking step should 956 // exit 957 double _time_target_ms; 958 // the start time of the current marking step 959 double _start_time_ms; 960 961 // the oop closure used for iterations over oops 962 G1CMOopClosure* _cm_oop_closure; 963 964 // the region this task is scanning, NULL if we're not scanning any 965 HeapRegion* _curr_region; 966 // the local finger of this task, NULL if we're not scanning a region 967 HeapWord* _finger; 968 // limit of the region this task is scanning, NULL if we're not scanning one 969 HeapWord* _region_limit; 970 971 // the number of words this task has scanned 972 size_t _words_scanned; 973 // When _words_scanned reaches this limit, the regular clock is 974 // called. Notice that this might be decreased under certain 975 // circumstances (i.e. when we believe that we did an expensive 976 // operation). 977 size_t _words_scanned_limit; 978 // the initial value of _words_scanned_limit (i.e. what it was 979 // before it was decreased). 980 size_t _real_words_scanned_limit; 981 982 // the number of references this task has visited 983 size_t _refs_reached; 984 // When _refs_reached reaches this limit, the regular clock is 985 // called. Notice this this might be decreased under certain 986 // circumstances (i.e. when we believe that we did an expensive 987 // operation). 988 size_t _refs_reached_limit; 989 // the initial value of _refs_reached_limit (i.e. what it was before 990 // it was decreased). 991 size_t _real_refs_reached_limit; 992 993 // used by the work stealing stuff 994 int _hash_seed; 995 // if this is true, then the task has aborted for some reason 996 bool _has_aborted; 997 // set when the task aborts because it has met its time quota 998 bool _has_timed_out; 999 // true when we're draining SATB buffers; this avoids the task 1000 // aborting due to SATB buffers being available (as we're already 1001 // dealing with them) 1002 bool _draining_satb_buffers; 1003 1004 // number sequence of past step times 1005 NumberSeq _step_times_ms; 1006 // elapsed time of this task 1007 double _elapsed_time_ms; 1008 // termination time of this task 1009 double _termination_time_ms; 1010 // when this task got into the termination protocol 1011 double _termination_start_time_ms; 1012 1013 // true when the task is during a concurrent phase, false when it is 1014 // in the remark phase (so, in the latter case, we do not have to 1015 // check all the things that we have to check during the concurrent 1016 // phase, i.e. SATB buffer availability...) 1017 bool _concurrent; 1018 1019 TruncatedSeq _marking_step_diffs_ms; 1020 1021 // Counting data structures. Embedding the task's marked_bytes_array 1022 // and card bitmap into the actual task saves having to go through 1023 // the ConcurrentMark object. 1024 size_t* _marked_bytes_array; 1025 BitMap* _card_bm; 1026 1027 // LOTS of statistics related with this task 1028 #if _MARKING_STATS_ 1029 NumberSeq _all_clock_intervals_ms; 1030 double _interval_start_time_ms; 1031 1032 int _aborted; 1033 int _aborted_overflow; 1034 int _aborted_cm_aborted; 1035 int _aborted_yield; 1036 int _aborted_timed_out; 1037 int _aborted_satb; 1038 int _aborted_termination; 1039 1040 int _steal_attempts; 1041 int _steals; 1042 1043 int _clock_due_to_marking; 1044 int _clock_due_to_scanning; 1045 1046 int _local_pushes; 1047 int _local_pops; 1048 int _local_max_size; 1049 int _objs_scanned; 1050 1051 int _global_pushes; 1052 int _global_pops; 1053 int _global_max_size; 1054 1055 int _global_transfers_to; 1056 int _global_transfers_from; 1057 1058 int _regions_claimed; 1059 int _objs_found_on_bitmap; 1060 1061 int _satb_buffers_processed; 1062 #endif // _MARKING_STATS_ 1063 1064 // it updates the local fields after this task has claimed 1065 // a new region to scan 1066 void setup_for_region(HeapRegion* hr); 1067 // it brings up-to-date the limit of the region 1068 void update_region_limit(); 1069 1070 // called when either the words scanned or the refs visited limit 1071 // has been reached 1072 void reached_limit(); 1073 // recalculates the words scanned and refs visited limits 1074 void recalculate_limits(); 1075 // decreases the words scanned and refs visited limits when we reach 1076 // an expensive operation 1077 void decrease_limits(); 1078 // it checks whether the words scanned or refs visited reached their 1079 // respective limit and calls reached_limit() if they have 1080 void check_limits() { 1081 if (_words_scanned >= _words_scanned_limit || 1082 _refs_reached >= _refs_reached_limit) { 1083 reached_limit(); 1084 } 1085 } 1086 // this is supposed to be called regularly during a marking step as 1087 // it checks a bunch of conditions that might cause the marking step 1088 // to abort 1089 void regular_clock_call(); 1090 bool concurrent() { return _concurrent; } 1091 1092 public: 1093 // It resets the task; it should be called right at the beginning of 1094 // a marking phase. 1095 void reset(CMBitMap* _nextMarkBitMap); 1096 // it clears all the fields that correspond to a claimed region. 1097 void clear_region_fields(); 1098 1099 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 1100 1101 // The main method of this class which performs a marking step 1102 // trying not to exceed the given duration. However, it might exit 1103 // prematurely, according to some conditions (i.e. SATB buffers are 1104 // available for processing). 1105 void do_marking_step(double target_ms, bool do_stealing, bool do_termination); 1106 1107 // These two calls start and stop the timer 1108 void record_start_time() { 1109 _elapsed_time_ms = os::elapsedTime() * 1000.0; 1110 } 1111 void record_end_time() { 1112 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1113 } 1114 1115 // returns the task ID 1116 int task_id() { return _task_id; } 1117 1118 // From TerminatorTerminator. It determines whether this task should 1119 // exit the termination protocol after it's entered it. 1120 virtual bool should_exit_termination(); 1121 1122 // Resets the local region fields after a task has finished scanning a 1123 // region; or when they have become stale as a result of the region 1124 // being evacuated. 1125 void giveup_current_region(); 1126 1127 HeapWord* finger() { return _finger; } 1128 1129 bool has_aborted() { return _has_aborted; } 1130 void set_has_aborted() { _has_aborted = true; } 1131 void clear_has_aborted() { _has_aborted = false; } 1132 bool has_timed_out() { return _has_timed_out; } 1133 bool claimed() { return _claimed; } 1134 1135 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 1136 1137 // It grays the object by marking it and, if necessary, pushing it 1138 // on the local queue 1139 inline void deal_with_reference(oop obj); 1140 1141 // It scans an object and visits its children. 1142 void scan_object(oop obj); 1143 1144 // It pushes an object on the local queue. 1145 inline void push(oop obj); 1146 1147 // These two move entries to/from the global stack. 1148 void move_entries_to_global_stack(); 1149 void get_entries_from_global_stack(); 1150 1151 // It pops and scans objects from the local queue. If partially is 1152 // true, then it stops when the queue size is of a given limit. If 1153 // partially is false, then it stops when the queue is empty. 1154 void drain_local_queue(bool partially); 1155 // It moves entries from the global stack to the local queue and 1156 // drains the local queue. If partially is true, then it stops when 1157 // both the global stack and the local queue reach a given size. If 1158 // partially if false, it tries to empty them totally. 1159 void drain_global_stack(bool partially); 1160 // It keeps picking SATB buffers and processing them until no SATB 1161 // buffers are available. 1162 void drain_satb_buffers(); 1163 1164 // moves the local finger to a new location 1165 inline void move_finger_to(HeapWord* new_finger) { 1166 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 1167 _finger = new_finger; 1168 } 1169 1170 CMTask(int task_num, ConcurrentMark *cm, 1171 size_t* marked_bytes, BitMap* card_bm, 1172 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1173 1174 // it prints statistics associated with this task 1175 void print_stats(); 1176 1177 #if _MARKING_STATS_ 1178 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } 1179 #endif // _MARKING_STATS_ 1180 }; 1181 1182 // Class that's used to to print out per-region liveness 1183 // information. It's currently used at the end of marking and also 1184 // after we sort the old regions at the end of the cleanup operation. 1185 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 1186 private: 1187 outputStream* _out; 1188 1189 // Accumulators for these values. 1190 size_t _total_used_bytes; 1191 size_t _total_capacity_bytes; 1192 size_t _total_prev_live_bytes; 1193 size_t _total_next_live_bytes; 1194 1195 // These are set up when we come across a "stars humongous" region 1196 // (as this is where most of this information is stored, not in the 1197 // subsequent "continues humongous" regions). After that, for every 1198 // region in a given humongous region series we deduce the right 1199 // values for it by simply subtracting the appropriate amount from 1200 // these fields. All these values should reach 0 after we've visited 1201 // the last region in the series. 1202 size_t _hum_used_bytes; 1203 size_t _hum_capacity_bytes; 1204 size_t _hum_prev_live_bytes; 1205 size_t _hum_next_live_bytes; 1206 1207 static double perc(size_t val, size_t total) { 1208 if (total == 0) { 1209 return 0.0; 1210 } else { 1211 return 100.0 * ((double) val / (double) total); 1212 } 1213 } 1214 1215 static double bytes_to_mb(size_t val) { 1216 return (double) val / (double) M; 1217 } 1218 1219 // See the .cpp file. 1220 size_t get_hum_bytes(size_t* hum_bytes); 1221 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 1222 size_t* prev_live_bytes, size_t* next_live_bytes); 1223 1224 public: 1225 // The header and footer are printed in the constructor and 1226 // destructor respectively. 1227 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); 1228 virtual bool doHeapRegion(HeapRegion* r); 1229 ~G1PrintRegionLivenessInfoClosure(); 1230 }; 1231 1232 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP