1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1ConcurrentMarkBitMap.hpp" 30 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp" 31 #include "gc/g1/g1RegionToSpaceMapper.hpp" 32 #include "gc/g1/heapRegionSet.hpp" 33 #include "gc/shared/taskqueue.hpp" 34 35 class G1CollectedHeap; 36 class G1CMBitMap; 37 class G1CMTask; 38 class G1ConcurrentMark; 39 class ConcurrentGCTimer; 40 class G1OldTracer; 41 class G1SurvivorRegions; 42 43 #ifdef _MSC_VER 44 #pragma warning(push) 45 // warning C4522: multiple assignment operators specified 46 #pragma warning(disable:4522) 47 #endif 48 49 // This is a container class for either an oop or a continuation address for 50 // mark stack entries. Both are pushed onto the mark stack. 51 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC { 52 private: 53 void* _holder; 54 55 static const uintptr_t ArraySliceBit = 1; 56 57 G1TaskQueueEntry(oop obj) : _holder(obj) { 58 assert(_holder != NULL, "Not allowed to set NULL task queue element"); 59 } 60 G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { } 61 public: 62 G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; } 63 G1TaskQueueEntry() : _holder(NULL) { } 64 65 static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); } 66 static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); } 67 68 G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) { 69 _holder = t._holder; 70 return *this; 71 } 72 73 volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile { 74 _holder = t._holder; 75 return *this; 76 } 77 78 oop obj() const { 79 assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder)); 80 return (oop)_holder; 81 } 82 83 HeapWord* slice() const { 84 assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder)); 85 return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit); 86 } 87 88 bool is_oop() const { return !is_array_slice(); } 89 bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; } 90 bool is_null() const { return _holder == NULL; } 91 }; 92 93 #ifdef _MSC_VER 94 #pragma warning(pop) 95 #endif 96 97 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue; 98 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 99 100 // Closure used by CM during concurrent reference discovery 101 // and reference processing (during remarking) to determine 102 // if a particular object is alive. It is primarily used 103 // to determine if referents of discovered reference objects 104 // are alive. An instance is also embedded into the 105 // reference processor as the _is_alive_non_header field 106 class G1CMIsAliveClosure: public BoolObjectClosure { 107 G1CollectedHeap* _g1; 108 public: 109 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 110 111 bool do_object_b(oop obj); 112 }; 113 114 // Represents the overflow mark stack used by concurrent marking. 115 // 116 // Stores oops in a huge buffer in virtual memory that is always fully committed. 117 // Resizing may only happen during a STW pause when the stack is empty. 118 // 119 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark 120 // stack memory is split into evenly sized chunks of oops. Users can only 121 // add or remove entries on that basis. 122 // Chunks are filled in increasing address order. Not completely filled chunks 123 // have a NULL element as a terminating element. 124 // 125 // Every chunk has a header containing a single pointer element used for memory 126 // management. This wastes some space, but is negligible (< .1% with current sizing). 127 // 128 // Memory management is done using a mix of tracking a high water-mark indicating 129 // that all chunks at a lower address are valid chunks, and a singly linked free 130 // list connecting all empty chunks. 131 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { 132 public: 133 // Number of TaskQueueEntries that can fit in a single chunk. 134 static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */; 135 private: 136 struct TaskQueueEntryChunk { 137 TaskQueueEntryChunk* next; 138 G1TaskQueueEntry data[EntriesPerChunk]; 139 }; 140 141 size_t _max_chunk_capacity; // Maximum number of TaskQueueEntryChunk elements on the stack. 142 143 TaskQueueEntryChunk* _base; // Bottom address of allocated memory area. 144 size_t _chunk_capacity; // Current maximum number of TaskQueueEntryChunk elements. 145 146 char _pad0[DEFAULT_CACHE_LINE_SIZE]; 147 TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. 148 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)]; 149 TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. 150 volatile size_t _chunks_in_chunk_list; 151 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; 152 153 volatile size_t _hwm; // High water mark within the reserved space. 154 char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; 155 156 // Allocate a new chunk from the reserved memory, using the high water mark. Returns 157 // NULL if out of memory. 158 TaskQueueEntryChunk* allocate_new_chunk(); 159 160 // Atomically add the given chunk to the list. 161 void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); 162 // Atomically remove and return a chunk from the given list. Returns NULL if the 163 // list is empty. 164 TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); 165 166 void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); 167 void add_chunk_to_free_list(TaskQueueEntryChunk* elem); 168 169 TaskQueueEntryChunk* remove_chunk_from_chunk_list(); 170 TaskQueueEntryChunk* remove_chunk_from_free_list(); 171 172 // Resizes the mark stack to the given new capacity. Releases any previous 173 // memory if successful. 174 bool resize(size_t new_capacity); 175 176 public: 177 G1CMMarkStack(); 178 ~G1CMMarkStack(); 179 180 // Alignment and minimum capacity of this mark stack in number of oops. 181 static size_t capacity_alignment(); 182 183 // Allocate and initialize the mark stack with the given number of oops. 184 bool initialize(size_t initial_capacity, size_t max_capacity); 185 186 // Pushes the given buffer containing at most EntriesPerChunk elements on the mark 187 // stack. If less than EntriesPerChunk elements are to be pushed, the array must 188 // be terminated with a NULL. 189 // Returns whether the buffer contents were successfully pushed to the global mark 190 // stack. 191 bool par_push_chunk(G1TaskQueueEntry* buffer); 192 193 // Pops a chunk from this mark stack, copying them into the given buffer. This 194 // chunk may contain up to EntriesPerChunk elements. If there are less, the last 195 // element in the array is a NULL pointer. 196 bool par_pop_chunk(G1TaskQueueEntry* buffer); 197 198 // Return whether the chunk list is empty. Racy due to unsynchronized access to 199 // _chunk_list. 200 bool is_empty() const { return _chunk_list == NULL; } 201 202 size_t capacity() const { return _chunk_capacity; } 203 204 // Expand the stack, typically in response to an overflow condition 205 void expand(); 206 207 // Return the approximate number of oops on this mark stack. Racy due to 208 // unsynchronized access to _chunks_in_chunk_list. 209 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; } 210 211 void set_empty(); 212 213 // Apply Fn to every oop on the mark stack. The mark stack must not 214 // be modified while iterating. 215 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN; 216 }; 217 218 // Root Regions are regions that are not empty at the beginning of a 219 // marking cycle and which we might collect during an evacuation pause 220 // while the cycle is active. Given that, during evacuation pauses, we 221 // do not copy objects that are explicitly marked, what we have to do 222 // for the root regions is to scan them and mark all objects reachable 223 // from them. According to the SATB assumptions, we only need to visit 224 // each object once during marking. So, as long as we finish this scan 225 // before the next evacuation pause, we can copy the objects from the 226 // root regions without having to mark them or do anything else to them. 227 // 228 // Currently, we only support root region scanning once (at the start 229 // of the marking cycle) and the root regions are all the survivor 230 // regions populated during the initial-mark pause. 231 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { 232 private: 233 const G1SurvivorRegions* _survivors; 234 G1ConcurrentMark* _cm; 235 236 volatile bool _scan_in_progress; 237 volatile bool _should_abort; 238 volatile int _claimed_survivor_index; 239 240 void notify_scan_done(); 241 242 public: 243 G1CMRootRegions(); 244 // We actually do most of the initialization in this method. 245 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm); 246 247 // Reset the claiming / scanning of the root regions. 248 void prepare_for_scan(); 249 250 // Forces get_next() to return NULL so that the iteration aborts early. 251 void abort() { _should_abort = true; } 252 253 // Return true if the CM thread are actively scanning root regions, 254 // false otherwise. 255 bool scan_in_progress() { return _scan_in_progress; } 256 257 // Claim the next root region to scan atomically, or return NULL if 258 // all have been claimed. 259 HeapRegion* claim_next(); 260 261 // The number of root regions to scan. 262 uint num_root_regions() const; 263 264 void cancel_scan(); 265 266 // Flag that we're done with root region scanning and notify anyone 267 // who's waiting on it. If aborted is false, assume that all regions 268 // have been claimed. 269 void scan_finished(); 270 271 // If CM threads are still scanning root regions, wait until they 272 // are done. Return true if we had to wait, false otherwise. 273 bool wait_until_scan_finished(); 274 }; 275 276 class ConcurrentMarkThread; 277 278 class G1ConcurrentMark: public CHeapObj<mtGC> { 279 friend class ConcurrentMarkThread; 280 friend class G1ParNoteEndTask; 281 friend class G1VerifyLiveDataClosure; 282 friend class G1CMRefProcTaskProxy; 283 friend class G1CMRefProcTaskExecutor; 284 friend class G1CMKeepAliveAndDrainClosure; 285 friend class G1CMDrainMarkingStackClosure; 286 friend class G1CMBitMapClosure; 287 friend class G1CMConcurrentMarkingTask; 288 friend class G1CMRemarkTask; 289 friend class G1CMTask; 290 291 protected: 292 ConcurrentMarkThread* _cmThread; // The thread doing the work 293 G1CollectedHeap* _g1h; // The heap 294 uint _parallel_marking_threads; // The number of marking 295 // threads we're using 296 uint _max_parallel_marking_threads; // Max number of marking 297 // threads we'll ever use 298 double _sleep_factor; // How much we have to sleep, with 299 // respect to the work we just did, to 300 // meet the marking overhead goal 301 double _marking_task_overhead; // Marking target overhead for 302 // a single task 303 304 FreeRegionList _cleanup_list; 305 306 // Concurrent marking support structures 307 G1CMBitMap _markBitMap1; 308 G1CMBitMap _markBitMap2; 309 G1CMBitMap* _prevMarkBitMap; // Completed mark bitmap 310 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 311 312 // Heap bounds 313 HeapWord* _heap_start; 314 HeapWord* _heap_end; 315 316 // Root region tracking and claiming 317 G1CMRootRegions _root_regions; 318 319 // For gray objects 320 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger 321 HeapWord* volatile _finger; // The global finger, region aligned, 322 // always points to the end of the 323 // last claimed region 324 325 // Marking tasks 326 uint _max_worker_id;// Maximum worker id 327 uint _active_tasks; // Task num currently active 328 G1CMTask** _tasks; // Task queue array (max_worker_id len) 329 G1CMTaskQueueSet* _task_queues; // Task queue set 330 ParallelTaskTerminator _terminator; // For termination 331 332 // Two sync barriers that are used to synchronize tasks when an 333 // overflow occurs. The algorithm is the following. All tasks enter 334 // the first one to ensure that they have all stopped manipulating 335 // the global data structures. After they exit it, they re-initialize 336 // their data structures and task 0 re-initializes the global data 337 // structures. Then, they enter the second sync barrier. This 338 // ensure, that no task starts doing work before all data 339 // structures (local and global) have been re-initialized. When they 340 // exit it, they are free to start working again. 341 WorkGangBarrierSync _first_overflow_barrier_sync; 342 WorkGangBarrierSync _second_overflow_barrier_sync; 343 344 // This is set by any task, when an overflow on the global data 345 // structures is detected 346 volatile bool _has_overflown; 347 // True: marking is concurrent, false: we're in remark 348 volatile bool _concurrent; 349 // Set at the end of a Full GC so that marking aborts 350 volatile bool _has_aborted; 351 352 // Used when remark aborts due to an overflow to indicate that 353 // another concurrent marking phase should start 354 volatile bool _restart_for_overflow; 355 356 // This is true from the very start of concurrent marking until the 357 // point when all the tasks complete their work. It is really used 358 // to determine the points between the end of concurrent marking and 359 // time of remark. 360 volatile bool _concurrent_marking_in_progress; 361 362 ConcurrentGCTimer* _gc_timer_cm; 363 364 G1OldTracer* _gc_tracer_cm; 365 366 // All of these times are in ms 367 NumberSeq _init_times; 368 NumberSeq _remark_times; 369 NumberSeq _remark_mark_times; 370 NumberSeq _remark_weak_ref_times; 371 NumberSeq _cleanup_times; 372 double _total_counting_time; 373 double _total_rs_scrub_time; 374 375 double* _accum_task_vtime; // Accumulated task vtime 376 377 WorkGang* _parallel_workers; 378 379 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 380 void weakRefsWork(bool clear_all_soft_refs); 381 382 void swapMarkBitMaps(); 383 384 // It resets the global marking data structures, as well as the 385 // task local ones; should be called during initial mark. 386 void reset(); 387 388 // Resets all the marking data structures. Called when we have to restart 389 // marking or when marking completes (via set_non_marking_state below). 390 void reset_marking_state(); 391 392 // We do this after we're done with marking so that the marking data 393 // structures are initialized to a sensible and predictable state. 394 void set_non_marking_state(); 395 396 // Called to indicate how many threads are currently active. 397 void set_concurrency(uint active_tasks); 398 399 // It should be called to indicate which phase we're in (concurrent 400 // mark or remark) and how many threads are currently active. 401 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 402 403 // Prints all gathered CM-related statistics 404 void print_stats(); 405 406 bool cleanup_list_is_empty() { 407 return _cleanup_list.is_empty(); 408 } 409 410 // Accessor methods 411 uint parallel_marking_threads() const { return _parallel_marking_threads; } 412 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 413 double sleep_factor() { return _sleep_factor; } 414 double marking_task_overhead() { return _marking_task_overhead;} 415 416 HeapWord* finger() { return _finger; } 417 bool concurrent() { return _concurrent; } 418 uint active_tasks() { return _active_tasks; } 419 ParallelTaskTerminator* terminator() { return &_terminator; } 420 421 // It claims the next available region to be scanned by a marking 422 // task/thread. It might return NULL if the next region is empty or 423 // we have run out of regions. In the latter case, out_of_regions() 424 // determines whether we've really run out of regions or the task 425 // should call claim_region() again. This might seem a bit 426 // awkward. Originally, the code was written so that claim_region() 427 // either successfully returned with a non-empty region or there 428 // were no more regions to be claimed. The problem with this was 429 // that, in certain circumstances, it iterated over large chunks of 430 // the heap finding only empty regions and, while it was working, it 431 // was preventing the calling task to call its regular clock 432 // method. So, this way, each task will spend very little time in 433 // claim_region() and is allowed to call the regular clock method 434 // frequently. 435 HeapRegion* claim_region(uint worker_id); 436 437 // It determines whether we've run out of regions to scan. Note that 438 // the finger can point past the heap end in case the heap was expanded 439 // to satisfy an allocation without doing a GC. This is fine, because all 440 // objects in those regions will be considered live anyway because of 441 // SATB guarantees (i.e. their TAMS will be equal to bottom). 442 bool out_of_regions() { return _finger >= _heap_end; } 443 444 // Returns the task with the given id 445 G1CMTask* task(int id) { 446 assert(0 <= id && id < (int) _active_tasks, 447 "task id not within active bounds"); 448 return _tasks[id]; 449 } 450 451 // Returns the task queue with the given id 452 G1CMTaskQueue* task_queue(int id) { 453 assert(0 <= id && id < (int) _active_tasks, 454 "task queue id not within active bounds"); 455 return (G1CMTaskQueue*) _task_queues->queue(id); 456 } 457 458 // Returns the task queue set 459 G1CMTaskQueueSet* task_queues() { return _task_queues; } 460 461 // Access / manipulation of the overflow flag which is set to 462 // indicate that the global stack has overflown 463 bool has_overflown() { return _has_overflown; } 464 void set_has_overflown() { _has_overflown = true; } 465 void clear_has_overflown() { _has_overflown = false; } 466 bool restart_for_overflow() { return _restart_for_overflow; } 467 468 // Methods to enter the two overflow sync barriers 469 void enter_first_sync_barrier(uint worker_id); 470 void enter_second_sync_barrier(uint worker_id); 471 472 // Card index of the bottom of the G1 heap. Used for biasing indices into 473 // the card bitmaps. 474 intptr_t _heap_bottom_card_num; 475 476 // Set to true when initialization is complete 477 bool _completed_initialization; 478 479 // end_timer, true to end gc timer after ending concurrent phase. 480 void register_concurrent_phase_end_common(bool end_timer); 481 482 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 483 // true, periodically insert checks to see if this method should exit prematurely. 484 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 485 public: 486 // Manipulation of the global mark stack. 487 // The push and pop operations are used by tasks for transfers 488 // between task-local queues and the global mark stack. 489 bool mark_stack_push(G1TaskQueueEntry* arr) { 490 if (!_global_mark_stack.par_push_chunk(arr)) { 491 set_has_overflown(); 492 return false; 493 } 494 return true; 495 } 496 bool mark_stack_pop(G1TaskQueueEntry* arr) { 497 return _global_mark_stack.par_pop_chunk(arr); 498 } 499 size_t mark_stack_size() { return _global_mark_stack.size(); } 500 size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; } 501 bool mark_stack_empty() { return _global_mark_stack.is_empty(); } 502 503 G1CMRootRegions* root_regions() { return &_root_regions; } 504 505 bool concurrent_marking_in_progress() { 506 return _concurrent_marking_in_progress; 507 } 508 void set_concurrent_marking_in_progress() { 509 _concurrent_marking_in_progress = true; 510 } 511 void clear_concurrent_marking_in_progress() { 512 _concurrent_marking_in_progress = false; 513 } 514 515 void concurrent_cycle_start(); 516 void concurrent_cycle_end(); 517 518 void update_accum_task_vtime(int i, double vtime) { 519 _accum_task_vtime[i] += vtime; 520 } 521 522 double all_task_accum_vtime() { 523 double ret = 0.0; 524 for (uint i = 0; i < _max_worker_id; ++i) 525 ret += _accum_task_vtime[i]; 526 return ret; 527 } 528 529 // Attempts to steal an object from the task queues of other tasks 530 bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry); 531 532 G1ConcurrentMark(G1CollectedHeap* g1h, 533 G1RegionToSpaceMapper* prev_bitmap_storage, 534 G1RegionToSpaceMapper* next_bitmap_storage); 535 ~G1ConcurrentMark(); 536 537 ConcurrentMarkThread* cmThread() { return _cmThread; } 538 539 G1CMBitMapRO prevMarkBitMap() const { return G1CMBitMapRO(_prevMarkBitMap); } 540 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 541 542 // Returns the number of GC threads to be used in a concurrent 543 // phase based on the number of GC threads being used in a STW 544 // phase. 545 uint scale_parallel_threads(uint n_par_threads); 546 547 // Calculates the number of GC threads to be used in a concurrent phase. 548 uint calc_parallel_marking_threads(); 549 550 // The following three are interaction between CM and 551 // G1CollectedHeap 552 553 // This notifies CM that a root during initial-mark needs to be 554 // grayed. It is MT-safe. hr is the region that 555 // contains the object and it's passed optionally from callers who 556 // might already have it (no point in recalculating it). 557 inline void grayRoot(oop obj, 558 HeapRegion* hr = NULL); 559 560 // Prepare internal data structures for the next mark cycle. This includes clearing 561 // the next mark bitmap and some internal data structures. This method is intended 562 // to be called concurrently to the mutator. It will yield to safepoint requests. 563 void cleanup_for_next_mark(); 564 565 // Clear the previous marking bitmap during safepoint. 566 void clear_prev_bitmap(WorkGang* workers); 567 568 // Return whether the next mark bitmap has no marks set. To be used for assertions 569 // only. Will not yield to pause requests. 570 bool nextMarkBitmapIsClear(); 571 572 // These two do the work that needs to be done before and after the 573 // initial root checkpoint. Since this checkpoint can be done at two 574 // different points (i.e. an explicit pause or piggy-backed on a 575 // young collection), then it's nice to be able to easily share the 576 // pre/post code. It might be the case that we can put everything in 577 // the post method. TP 578 void checkpointRootsInitialPre(); 579 void checkpointRootsInitialPost(); 580 581 // Scan all the root regions and mark everything reachable from 582 // them. 583 void scan_root_regions(); 584 585 // Scan a single root region and mark everything reachable from it. 586 void scanRootRegion(HeapRegion* hr); 587 588 // Do concurrent phase of marking, to a tentative transitive closure. 589 void mark_from_roots(); 590 591 void checkpointRootsFinal(bool clear_all_soft_refs); 592 void checkpointRootsFinalWork(); 593 void cleanup(); 594 void complete_cleanup(); 595 596 // Mark in the previous bitmap. NB: this is usually read-only, so use 597 // this carefully! 598 inline void markPrev(oop p); 599 600 // Clears marks for all objects in the given range, for the prev or 601 // next bitmaps. NB: the previous bitmap is usually 602 // read-only, so use this carefully! 603 void clearRangePrevBitmap(MemRegion mr); 604 605 // Verify that there are no CSet oops on the stacks (taskqueues / 606 // global mark stack) and fingers (global / per-task). 607 // If marking is not in progress, it's a no-op. 608 void verify_no_cset_oops() PRODUCT_RETURN; 609 610 inline bool isPrevMarked(oop p) const; 611 612 inline bool do_yield_check(); 613 614 // Abandon current marking iteration due to a Full GC. 615 void abort(); 616 617 bool has_aborted() { return _has_aborted; } 618 619 void print_summary_info(); 620 621 void print_worker_threads_on(outputStream* st) const; 622 void threads_do(ThreadClosure* tc) const; 623 624 void print_on_error(outputStream* st) const; 625 626 // Attempts to mark the given object on the next mark bitmap. 627 inline bool par_mark(oop obj); 628 629 // Returns true if initialization was successfully completed. 630 bool completed_initialization() const { 631 return _completed_initialization; 632 } 633 634 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 635 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 636 637 private: 638 // Clear (Reset) all liveness count data. 639 void clear_live_data(WorkGang* workers); 640 641 #ifdef ASSERT 642 // Verify all of the above data structures that they are in initial state. 643 void verify_live_data_clear(); 644 #endif 645 646 // Aggregates the per-card liveness data based on the current marking. Also sets 647 // the amount of marked bytes for each region. 648 void create_live_data(); 649 650 void finalize_live_data(); 651 652 void verify_live_data(); 653 }; 654 655 // A class representing a marking task. 656 class G1CMTask : public TerminatorTerminator { 657 private: 658 enum PrivateConstants { 659 // The regular clock call is called once the scanned words reaches 660 // this limit 661 words_scanned_period = 12*1024, 662 // The regular clock call is called once the number of visited 663 // references reaches this limit 664 refs_reached_period = 1024, 665 // Initial value for the hash seed, used in the work stealing code 666 init_hash_seed = 17 667 }; 668 669 G1CMObjArrayProcessor _objArray_processor; 670 671 uint _worker_id; 672 G1CollectedHeap* _g1h; 673 G1ConcurrentMark* _cm; 674 G1CMBitMap* _nextMarkBitMap; 675 // the task queue of this task 676 G1CMTaskQueue* _task_queue; 677 private: 678 // the task queue set---needed for stealing 679 G1CMTaskQueueSet* _task_queues; 680 // indicates whether the task has been claimed---this is only for 681 // debugging purposes 682 bool _claimed; 683 684 // number of calls to this task 685 int _calls; 686 687 // when the virtual timer reaches this time, the marking step should 688 // exit 689 double _time_target_ms; 690 // the start time of the current marking step 691 double _start_time_ms; 692 693 // the oop closure used for iterations over oops 694 G1CMOopClosure* _cm_oop_closure; 695 696 // the region this task is scanning, NULL if we're not scanning any 697 HeapRegion* _curr_region; 698 // the local finger of this task, NULL if we're not scanning a region 699 HeapWord* _finger; 700 // limit of the region this task is scanning, NULL if we're not scanning one 701 HeapWord* _region_limit; 702 703 // the number of words this task has scanned 704 size_t _words_scanned; 705 // When _words_scanned reaches this limit, the regular clock is 706 // called. Notice that this might be decreased under certain 707 // circumstances (i.e. when we believe that we did an expensive 708 // operation). 709 size_t _words_scanned_limit; 710 // the initial value of _words_scanned_limit (i.e. what it was 711 // before it was decreased). 712 size_t _real_words_scanned_limit; 713 714 // the number of references this task has visited 715 size_t _refs_reached; 716 // When _refs_reached reaches this limit, the regular clock is 717 // called. Notice this this might be decreased under certain 718 // circumstances (i.e. when we believe that we did an expensive 719 // operation). 720 size_t _refs_reached_limit; 721 // the initial value of _refs_reached_limit (i.e. what it was before 722 // it was decreased). 723 size_t _real_refs_reached_limit; 724 725 // used by the work stealing stuff 726 int _hash_seed; 727 // if this is true, then the task has aborted for some reason 728 bool _has_aborted; 729 // set when the task aborts because it has met its time quota 730 bool _has_timed_out; 731 // true when we're draining SATB buffers; this avoids the task 732 // aborting due to SATB buffers being available (as we're already 733 // dealing with them) 734 bool _draining_satb_buffers; 735 736 // number sequence of past step times 737 NumberSeq _step_times_ms; 738 // elapsed time of this task 739 double _elapsed_time_ms; 740 // termination time of this task 741 double _termination_time_ms; 742 // when this task got into the termination protocol 743 double _termination_start_time_ms; 744 745 // true when the task is during a concurrent phase, false when it is 746 // in the remark phase (so, in the latter case, we do not have to 747 // check all the things that we have to check during the concurrent 748 // phase, i.e. SATB buffer availability...) 749 bool _concurrent; 750 751 TruncatedSeq _marking_step_diffs_ms; 752 753 // it updates the local fields after this task has claimed 754 // a new region to scan 755 void setup_for_region(HeapRegion* hr); 756 // it brings up-to-date the limit of the region 757 void update_region_limit(); 758 759 // called when either the words scanned or the refs visited limit 760 // has been reached 761 void reached_limit(); 762 // recalculates the words scanned and refs visited limits 763 void recalculate_limits(); 764 // decreases the words scanned and refs visited limits when we reach 765 // an expensive operation 766 void decrease_limits(); 767 // it checks whether the words scanned or refs visited reached their 768 // respective limit and calls reached_limit() if they have 769 void check_limits() { 770 if (_words_scanned >= _words_scanned_limit || 771 _refs_reached >= _refs_reached_limit) { 772 reached_limit(); 773 } 774 } 775 // this is supposed to be called regularly during a marking step as 776 // it checks a bunch of conditions that might cause the marking step 777 // to abort 778 void regular_clock_call(); 779 bool concurrent() { return _concurrent; } 780 781 // Test whether obj might have already been passed over by the 782 // mark bitmap scan, and so needs to be pushed onto the mark stack. 783 bool is_below_finger(oop obj, HeapWord* global_finger) const; 784 785 template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry); 786 public: 787 // Apply the closure on the given area of the objArray. Return the number of words 788 // scanned. 789 inline size_t scan_objArray(objArrayOop obj, MemRegion mr); 790 // It resets the task; it should be called right at the beginning of 791 // a marking phase. 792 void reset(G1CMBitMap* _nextMarkBitMap); 793 // it clears all the fields that correspond to a claimed region. 794 void clear_region_fields(); 795 796 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 797 798 // The main method of this class which performs a marking step 799 // trying not to exceed the given duration. However, it might exit 800 // prematurely, according to some conditions (i.e. SATB buffers are 801 // available for processing). 802 void do_marking_step(double target_ms, 803 bool do_termination, 804 bool is_serial); 805 806 // These two calls start and stop the timer 807 void record_start_time() { 808 _elapsed_time_ms = os::elapsedTime() * 1000.0; 809 } 810 void record_end_time() { 811 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 812 } 813 814 // returns the worker ID associated with this task. 815 uint worker_id() { return _worker_id; } 816 817 // From TerminatorTerminator. It determines whether this task should 818 // exit the termination protocol after it's entered it. 819 virtual bool should_exit_termination(); 820 821 // Resets the local region fields after a task has finished scanning a 822 // region; or when they have become stale as a result of the region 823 // being evacuated. 824 void giveup_current_region(); 825 826 HeapWord* finger() { return _finger; } 827 828 bool has_aborted() { return _has_aborted; } 829 void set_has_aborted() { _has_aborted = true; } 830 void clear_has_aborted() { _has_aborted = false; } 831 bool has_timed_out() { return _has_timed_out; } 832 bool claimed() { return _claimed; } 833 834 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 835 836 // Increment the number of references this task has visited. 837 void increment_refs_reached() { ++_refs_reached; } 838 839 // Grey the object by marking it. If not already marked, push it on 840 // the local queue if below the finger. 841 // obj is below its region's NTAMS. 842 inline void make_reference_grey(oop obj); 843 844 // Grey the object (by calling make_grey_reference) if required, 845 // e.g. obj is below its containing region's NTAMS. 846 // Precondition: obj is a valid heap object. 847 inline void deal_with_reference(oop obj); 848 849 // It scans an object and visits its children. 850 inline void scan_task_entry(G1TaskQueueEntry task_entry); 851 852 // It pushes an object on the local queue. 853 inline void push(G1TaskQueueEntry task_entry); 854 855 // Move entries to the global stack. 856 void move_entries_to_global_stack(); 857 // Move entries from the global stack, return true if we were successful to do so. 858 bool get_entries_from_global_stack(); 859 860 // It pops and scans objects from the local queue. If partially is 861 // true, then it stops when the queue size is of a given limit. If 862 // partially is false, then it stops when the queue is empty. 863 void drain_local_queue(bool partially); 864 // It moves entries from the global stack to the local queue and 865 // drains the local queue. If partially is true, then it stops when 866 // both the global stack and the local queue reach a given size. If 867 // partially if false, it tries to empty them totally. 868 void drain_global_stack(bool partially); 869 // It keeps picking SATB buffers and processing them until no SATB 870 // buffers are available. 871 void drain_satb_buffers(); 872 873 // moves the local finger to a new location 874 inline void move_finger_to(HeapWord* new_finger) { 875 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 876 _finger = new_finger; 877 } 878 879 G1CMTask(uint worker_id, 880 G1ConcurrentMark *cm, 881 G1CMTaskQueue* task_queue, 882 G1CMTaskQueueSet* task_queues); 883 884 // it prints statistics associated with this task 885 void print_stats(); 886 }; 887 888 // Class that's used to to print out per-region liveness 889 // information. It's currently used at the end of marking and also 890 // after we sort the old regions at the end of the cleanup operation. 891 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 892 private: 893 // Accumulators for these values. 894 size_t _total_used_bytes; 895 size_t _total_capacity_bytes; 896 size_t _total_prev_live_bytes; 897 size_t _total_next_live_bytes; 898 899 // Accumulator for the remembered set size 900 size_t _total_remset_bytes; 901 902 // Accumulator for strong code roots memory size 903 size_t _total_strong_code_roots_bytes; 904 905 static double perc(size_t val, size_t total) { 906 if (total == 0) { 907 return 0.0; 908 } else { 909 return 100.0 * ((double) val / (double) total); 910 } 911 } 912 913 static double bytes_to_mb(size_t val) { 914 return (double) val / (double) M; 915 } 916 917 public: 918 // The header and footer are printed in the constructor and 919 // destructor respectively. 920 G1PrintRegionLivenessInfoClosure(const char* phase_name); 921 virtual bool doHeapRegion(HeapRegion* r); 922 ~G1PrintRegionLivenessInfoClosure(); 923 }; 924 925 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP