1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorState.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 #include "gc/g1/g1HeapVerifier.hpp"
34 #include "gc/g1/g1OopClosures.inline.hpp"
35 #include "gc/g1/g1Policy.hpp"
36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
37 #include "gc/g1/g1StringDedup.hpp"
38 #include "gc/g1/g1ThreadLocalData.hpp"
39 #include "gc/g1/heapRegion.inline.hpp"
40 #include "gc/g1/heapRegionRemSet.hpp"
41 #include "gc/g1/heapRegionSet.inline.hpp"
42 #include "gc/shared/adaptiveSizePolicy.hpp"
43 #include "gc/shared/gcId.hpp"
44 #include "gc/shared/gcTimer.hpp"
45 #include "gc/shared/gcTrace.hpp"
46 #include "gc/shared/gcTraceTime.inline.hpp"
47 #include "gc/shared/genOopClosures.inline.hpp"
48 #include "gc/shared/referencePolicy.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/suspendibleThreadSet.hpp"
51 #include "gc/shared/taskqueue.inline.hpp"
52 #include "gc/shared/vmGCOperations.hpp"
53 #include "gc/shared/weakProcessor.inline.hpp"
54 #include "include/jvm.h"
55 #include "logging/log.hpp"
56 #include "memory/allocation.hpp"
57 #include "memory/resourceArea.hpp"
58 #include "oops/access.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "runtime/atomic.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/java.hpp"
63 #include "runtime/prefetch.inline.hpp"
64 #include "services/memTracker.hpp"
65 #include "utilities/align.hpp"
66 #include "utilities/growableArray.hpp"
67
68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
69 assert(addr < _cm->finger(), "invariant");
70 assert(addr >= _task->finger(), "invariant");
71
72 // We move that task's local finger along.
73 _task->move_finger_to(addr);
74
75 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
76 // we only partially drain the local queue and global stack
77 _task->drain_local_queue(true);
78 _task->drain_global_stack(true);
79
80 // if the has_aborted flag has been raised, we need to bail out of
81 // the iteration
82 return !_task->has_aborted();
83 }
84
85 G1CMMarkStack::G1CMMarkStack() :
86 _max_chunk_capacity(0),
87 _base(NULL),
88 _chunk_capacity(0) {
89 set_empty();
90 }
91
92 bool G1CMMarkStack::resize(size_t new_capacity) {
93 assert(is_empty(), "Only resize when stack is empty.");
94 assert(new_capacity <= _max_chunk_capacity,
95 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
96
97 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
98
99 if (new_base == NULL) {
100 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
101 return false;
102 }
103 // Release old mapping.
104 if (_base != NULL) {
105 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
106 }
107
108 _base = new_base;
109 _chunk_capacity = new_capacity;
110 set_empty();
111
112 return true;
113 }
114
115 size_t G1CMMarkStack::capacity_alignment() {
116 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
117 }
118
119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
120 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
121
122 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
123
124 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
125 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
126
127 guarantee(initial_chunk_capacity <= _max_chunk_capacity,
128 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
129 _max_chunk_capacity,
130 initial_chunk_capacity);
131
132 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
133 initial_chunk_capacity, _max_chunk_capacity);
134
135 return resize(initial_chunk_capacity);
136 }
137
138 void G1CMMarkStack::expand() {
139 if (_chunk_capacity == _max_chunk_capacity) {
140 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
141 return;
142 }
143 size_t old_capacity = _chunk_capacity;
144 // Double capacity if possible
145 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
146
147 if (resize(new_capacity)) {
148 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
149 old_capacity, new_capacity);
150 } else {
151 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
152 old_capacity, new_capacity);
153 }
154 }
155
156 G1CMMarkStack::~G1CMMarkStack() {
157 if (_base != NULL) {
158 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
159 }
160 }
161
162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
163 elem->next = *list;
164 *list = elem;
165 }
166
167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
168 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
169 add_chunk_to_list(&_chunk_list, elem);
170 _chunks_in_chunk_list++;
171 }
172
173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
174 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
175 add_chunk_to_list(&_free_list, elem);
176 }
177
178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
179 TaskQueueEntryChunk* result = *list;
180 if (result != NULL) {
181 *list = (*list)->next;
182 }
183 return result;
184 }
185
186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
187 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
188 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
189 if (result != NULL) {
190 _chunks_in_chunk_list--;
191 }
192 return result;
193 }
194
195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
196 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
197 return remove_chunk_from_list(&_free_list);
198 }
199
200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
201 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
202 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
203 // wraparound of _hwm.
204 if (_hwm >= _chunk_capacity) {
205 return NULL;
206 }
207
208 size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
209 if (cur_idx >= _chunk_capacity) {
210 return NULL;
211 }
212
213 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
214 result->next = NULL;
215 return result;
216 }
217
218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
219 // Get a new chunk.
220 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
221
222 if (new_chunk == NULL) {
223 // Did not get a chunk from the free list. Allocate from backing memory.
224 new_chunk = allocate_new_chunk();
225
226 if (new_chunk == NULL) {
227 return false;
228 }
229 }
230
231 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
232
233 add_chunk_to_chunk_list(new_chunk);
234
235 return true;
236 }
237
238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
239 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
240
241 if (cur == NULL) {
242 return false;
243 }
244
245 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
246
247 add_chunk_to_free_list(cur);
248 return true;
249 }
250
251 void G1CMMarkStack::set_empty() {
252 _chunks_in_chunk_list = 0;
253 _hwm = 0;
254 _chunk_list = NULL;
255 _free_list = NULL;
256 }
257
258 G1CMRootRegions::G1CMRootRegions() :
259 _survivors(NULL), _cm(NULL), _scan_in_progress(false),
260 _should_abort(false), _claimed_survivor_index(0) { }
261
262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
263 _survivors = survivors;
264 _cm = cm;
265 }
266
267 void G1CMRootRegions::prepare_for_scan() {
268 assert(!scan_in_progress(), "pre-condition");
269
270 // Currently, only survivors can be root regions.
271 _claimed_survivor_index = 0;
272 _scan_in_progress = _survivors->regions()->is_nonempty();
273 _should_abort = false;
274 }
275
276 HeapRegion* G1CMRootRegions::claim_next() {
277 if (_should_abort) {
278 // If someone has set the should_abort flag, we return NULL to
279 // force the caller to bail out of their loop.
280 return NULL;
281 }
282
283 // Currently, only survivors can be root regions.
284 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
285
286 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
287 if (claimed_index < survivor_regions->length()) {
288 return survivor_regions->at(claimed_index);
289 }
290 return NULL;
291 }
292
293 uint G1CMRootRegions::num_root_regions() const {
294 return (uint)_survivors->regions()->length();
295 }
296
297 void G1CMRootRegions::notify_scan_done() {
298 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
299 _scan_in_progress = false;
300 RootRegionScan_lock->notify_all();
301 }
302
303 void G1CMRootRegions::cancel_scan() {
304 notify_scan_done();
305 }
306
307 void G1CMRootRegions::scan_finished() {
308 assert(scan_in_progress(), "pre-condition");
309
310 // Currently, only survivors can be root regions.
311 if (!_should_abort) {
312 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
313 assert((uint)_claimed_survivor_index >= _survivors->length(),
314 "we should have claimed all survivors, claimed index = %u, length = %u",
315 (uint)_claimed_survivor_index, _survivors->length());
316 }
317
318 notify_scan_done();
319 }
320
321 bool G1CMRootRegions::wait_until_scan_finished() {
322 if (!scan_in_progress()) {
323 return false;
324 }
325
326 {
327 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
328 while (scan_in_progress()) {
329 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
330 }
331 }
332 return true;
333 }
334
335 // Returns the maximum number of workers to be used in a concurrent
336 // phase based on the number of GC workers being used in a STW
337 // phase.
338 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
339 return MAX2((num_gc_workers + 2) / 4, 1U);
340 }
341
342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
343 G1RegionToSpaceMapper* prev_bitmap_storage,
344 G1RegionToSpaceMapper* next_bitmap_storage) :
345 // _cm_thread set inside the constructor
346 _g1h(g1h),
347 _completed_initialization(false),
348
349 _mark_bitmap_1(),
350 _mark_bitmap_2(),
351 _prev_mark_bitmap(&_mark_bitmap_1),
352 _next_mark_bitmap(&_mark_bitmap_2),
353
354 _heap(_g1h->reserved_region()),
355
356 _root_regions(),
357
358 _global_mark_stack(),
359
360 // _finger set in set_non_marking_state
361
362 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
363 _max_num_tasks(ParallelGCThreads),
364 // _num_active_tasks set in set_non_marking_state()
365 // _tasks set inside the constructor
366
367 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
368 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
369
370 _first_overflow_barrier_sync(),
371 _second_overflow_barrier_sync(),
372
373 _has_overflown(false),
374 _concurrent(false),
375 _has_aborted(false),
376 _restart_for_overflow(false),
377 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
378 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
379
380 // _verbose_level set below
381
382 _init_times(),
383 _remark_times(),
384 _remark_mark_times(),
385 _remark_weak_ref_times(),
386 _cleanup_times(),
387 _total_cleanup_time(0.0),
388
389 _accum_task_vtime(NULL),
390
391 _concurrent_workers(NULL),
392 _num_concurrent_workers(0),
393 _max_concurrent_workers(0),
394
395 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
396 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
397 {
398 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
399 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
400
401 // Create & start ConcurrentMark thread.
402 _cm_thread = new G1ConcurrentMarkThread(this);
403 if (_cm_thread->osthread() == NULL) {
404 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
405 }
406
407 assert(CGC_lock != NULL, "CGC_lock must be initialized");
408
409 SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
410 satb_qs.set_buffer_size(G1SATBBufferSize);
411
412 _root_regions.init(_g1h->survivor(), this);
413
414 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
415 // Calculate the number of concurrent worker threads by scaling
416 // the number of parallel GC threads.
417 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
418 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
419 }
420
421 assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
422 if (ConcGCThreads > ParallelGCThreads) {
423 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
424 ConcGCThreads, ParallelGCThreads);
425 return;
426 }
427
428 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
429 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
430
431 _num_concurrent_workers = ConcGCThreads;
432 _max_concurrent_workers = _num_concurrent_workers;
433
434 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
435 _concurrent_workers->initialize_workers();
436
437 if (FLAG_IS_DEFAULT(MarkStackSize)) {
438 size_t mark_stack_size =
439 MIN2(MarkStackSizeMax,
440 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
441 // Verify that the calculated value for MarkStackSize is in range.
442 // It would be nice to use the private utility routine from Arguments.
443 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
444 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
445 "must be between 1 and " SIZE_FORMAT,
446 mark_stack_size, MarkStackSizeMax);
447 return;
448 }
449 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
450 } else {
451 // Verify MarkStackSize is in range.
452 if (FLAG_IS_CMDLINE(MarkStackSize)) {
453 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
454 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
455 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
456 "must be between 1 and " SIZE_FORMAT,
457 MarkStackSize, MarkStackSizeMax);
458 return;
459 }
460 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
461 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
462 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
463 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
464 MarkStackSize, MarkStackSizeMax);
465 return;
466 }
467 }
468 }
469 }
470
471 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
472 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
473 }
474
475 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
476 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
477
478 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
479 _num_active_tasks = _max_num_tasks;
480
481 for (uint i = 0; i < _max_num_tasks; ++i) {
482 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
483 task_queue->initialize();
484 _task_queues->register_queue(i, task_queue);
485
486 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
487
488 _accum_task_vtime[i] = 0.0;
489 }
490
491 reset_at_marking_complete();
492 _completed_initialization = true;
493 }
494
495 void G1ConcurrentMark::reset() {
496 _has_aborted = false;
497
498 reset_marking_for_restart();
499
500 // Reset all tasks, since different phases will use different number of active
501 // threads. So, it's easiest to have all of them ready.
502 for (uint i = 0; i < _max_num_tasks; ++i) {
503 _tasks[i]->reset(_next_mark_bitmap);
504 }
505
506 uint max_regions = _g1h->max_regions();
507 for (uint i = 0; i < max_regions; i++) {
508 _top_at_rebuild_starts[i] = NULL;
509 _region_mark_stats[i].clear();
510 }
511 }
512
513 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
514 for (uint j = 0; j < _max_num_tasks; ++j) {
515 _tasks[j]->clear_mark_stats_cache(region_idx);
516 }
517 _top_at_rebuild_starts[region_idx] = NULL;
518 _region_mark_stats[region_idx].clear();
519 }
520
521 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
522 uint const region_idx = r->hrm_index();
523 if (r->is_humongous()) {
524 assert(r->is_starts_humongous(), "Got humongous continues region here");
525 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
526 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
527 clear_statistics_in_region(j);
528 }
529 } else {
530 clear_statistics_in_region(region_idx);
531 }
532 }
533
534 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
535 if (bitmap->is_marked(addr)) {
536 bitmap->clear(addr);
537 }
538 }
539
540 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
541 assert_at_safepoint_on_vm_thread();
542
543 // Need to clear all mark bits of the humongous object.
544 clear_mark_if_set(_prev_mark_bitmap, r->bottom());
545 clear_mark_if_set(_next_mark_bitmap, r->bottom());
546
547 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
548 return;
549 }
550
551 // Clear any statistics about the region gathered so far.
552 clear_statistics(r);
553 }
554
555 void G1ConcurrentMark::reset_marking_for_restart() {
556 _global_mark_stack.set_empty();
557
558 // Expand the marking stack, if we have to and if we can.
559 if (has_overflown()) {
560 _global_mark_stack.expand();
561
562 uint max_regions = _g1h->max_regions();
563 for (uint i = 0; i < max_regions; i++) {
564 _region_mark_stats[i].clear_during_overflow();
565 }
566 }
567
568 clear_has_overflown();
569 _finger = _heap.start();
570
571 for (uint i = 0; i < _max_num_tasks; ++i) {
572 G1CMTaskQueue* queue = _task_queues->queue(i);
573 queue->set_empty();
574 }
575 }
576
577 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
578 assert(active_tasks <= _max_num_tasks, "we should not have more");
579
580 _num_active_tasks = active_tasks;
581 // Need to update the three data structures below according to the
582 // number of active threads for this phase.
583 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
584 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
585 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
586 }
587
588 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
589 set_concurrency(active_tasks);
590
591 _concurrent = concurrent;
592
593 if (!concurrent) {
594 // At this point we should be in a STW phase, and completed marking.
595 assert_at_safepoint_on_vm_thread();
596 assert(out_of_regions(),
597 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
598 p2i(_finger), p2i(_heap.end()));
599 }
600 }
601
602 void G1ConcurrentMark::reset_at_marking_complete() {
603 // We set the global marking state to some default values when we're
604 // not doing marking.
605 reset_marking_for_restart();
606 _num_active_tasks = 0;
607 }
608
609 G1ConcurrentMark::~G1ConcurrentMark() {
610 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
611 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
612 // The G1ConcurrentMark instance is never freed.
613 ShouldNotReachHere();
614 }
615
616 class G1ClearBitMapTask : public AbstractGangTask {
617 public:
618 static size_t chunk_size() { return M; }
619
620 private:
621 // Heap region closure used for clearing the given mark bitmap.
622 class G1ClearBitmapHRClosure : public HeapRegionClosure {
623 private:
624 G1CMBitMap* _bitmap;
625 G1ConcurrentMark* _cm;
626 public:
627 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
628 }
629
630 virtual bool do_heap_region(HeapRegion* r) {
631 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
632
633 HeapWord* cur = r->bottom();
634 HeapWord* const end = r->end();
635
636 while (cur < end) {
637 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
638 _bitmap->clear_range(mr);
639
640 cur += chunk_size_in_words;
641
642 // Abort iteration if after yielding the marking has been aborted.
643 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
644 return true;
645 }
646 // Repeat the asserts from before the start of the closure. We will do them
647 // as asserts here to minimize their overhead on the product. However, we
648 // will have them as guarantees at the beginning / end of the bitmap
649 // clearing to get some checking in the product.
650 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
651 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
652 }
653 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
654
655 return false;
656 }
657 };
658
659 G1ClearBitmapHRClosure _cl;
660 HeapRegionClaimer _hr_claimer;
661 bool _suspendible; // If the task is suspendible, workers must join the STS.
662
663 public:
664 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
665 AbstractGangTask("G1 Clear Bitmap"),
666 _cl(bitmap, suspendible ? cm : NULL),
667 _hr_claimer(n_workers),
668 _suspendible(suspendible)
669 { }
670
671 void work(uint worker_id) {
672 SuspendibleThreadSetJoiner sts_join(_suspendible);
673 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
674 }
675
676 bool is_complete() {
677 return _cl.is_complete();
678 }
679 };
680
681 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
682 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
683
684 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
685 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
686
687 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
688
689 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
690
691 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
692 workers->run_task(&cl, num_workers);
693 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
694 }
695
696 void G1ConcurrentMark::cleanup_for_next_mark() {
697 // Make sure that the concurrent mark thread looks to still be in
698 // the current cycle.
699 guarantee(cm_thread()->during_cycle(), "invariant");
700
701 // We are finishing up the current cycle by clearing the next
702 // marking bitmap and getting it ready for the next cycle. During
703 // this time no other cycle can start. So, let's make sure that this
704 // is the case.
705 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
706
707 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
708
709 // Repeat the asserts from above.
710 guarantee(cm_thread()->during_cycle(), "invariant");
711 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
712 }
713
714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
715 assert_at_safepoint_on_vm_thread();
716 clear_bitmap(_prev_mark_bitmap, workers, false);
717 }
718
719 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
720 public:
721 bool do_heap_region(HeapRegion* r) {
722 r->note_start_of_marking();
723 return false;
724 }
725 };
726
727 void G1ConcurrentMark::pre_initial_mark() {
728 // Initialize marking structures. This has to be done in a STW phase.
729 reset();
730
731 // For each region note start of marking.
732 NoteStartOfMarkHRClosure startcl;
733 _g1h->heap_region_iterate(&startcl);
734 }
735
736
737 void G1ConcurrentMark::post_initial_mark() {
738 // Start Concurrent Marking weak-reference discovery.
739 ReferenceProcessor* rp = _g1h->ref_processor_cm();
740 // enable ("weak") refs discovery
741 rp->enable_discovery();
742 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
743
744 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
745 // This is the start of the marking cycle, we're expected all
746 // threads to have SATB queues with active set to false.
747 satb_mq_set.set_active_all_threads(true, /* new active value */
748 false /* expected_active */);
749
750 _root_regions.prepare_for_scan();
751
752 // update_g1_committed() will be called at the end of an evac pause
753 // when marking is on. So, it's also called at the end of the
754 // initial-mark pause to update the heap end, if the heap expands
755 // during it. No need to call it here.
756 }
757
758 /*
759 * Notice that in the next two methods, we actually leave the STS
760 * during the barrier sync and join it immediately afterwards. If we
761 * do not do this, the following deadlock can occur: one thread could
762 * be in the barrier sync code, waiting for the other thread to also
763 * sync up, whereas another one could be trying to yield, while also
764 * waiting for the other threads to sync up too.
765 *
766 * Note, however, that this code is also used during remark and in
767 * this case we should not attempt to leave / enter the STS, otherwise
768 * we'll either hit an assert (debug / fastdebug) or deadlock
769 * (product). So we should only leave / enter the STS if we are
770 * operating concurrently.
771 *
772 * Because the thread that does the sync barrier has left the STS, it
773 * is possible to be suspended for a Full GC or an evacuation pause
774 * could occur. This is actually safe, since the entering the sync
775 * barrier is one of the last things do_marking_step() does, and it
776 * doesn't manipulate any data structures afterwards.
777 */
778
779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
780 bool barrier_aborted;
781 {
782 SuspendibleThreadSetLeaver sts_leave(concurrent());
783 barrier_aborted = !_first_overflow_barrier_sync.enter();
784 }
785
786 // at this point everyone should have synced up and not be doing any
787 // more work
788
789 if (barrier_aborted) {
790 // If the barrier aborted we ignore the overflow condition and
791 // just abort the whole marking phase as quickly as possible.
792 return;
793 }
794 }
795
796 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
797 SuspendibleThreadSetLeaver sts_leave(concurrent());
798 _second_overflow_barrier_sync.enter();
799
800 // at this point everything should be re-initialized and ready to go
801 }
802
803 class G1CMConcurrentMarkingTask : public AbstractGangTask {
804 G1ConcurrentMark* _cm;
805
806 public:
807 void work(uint worker_id) {
808 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
809 ResourceMark rm;
810
811 double start_vtime = os::elapsedVTime();
812
813 {
814 SuspendibleThreadSetJoiner sts_join;
815
816 assert(worker_id < _cm->active_tasks(), "invariant");
817
818 G1CMTask* task = _cm->task(worker_id);
819 task->record_start_time();
820 if (!_cm->has_aborted()) {
821 do {
822 task->do_marking_step(G1ConcMarkStepDurationMillis,
823 true /* do_termination */,
824 false /* is_serial*/);
825
826 _cm->do_yield_check();
827 } while (!_cm->has_aborted() && task->has_aborted());
828 }
829 task->record_end_time();
830 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
831 }
832
833 double end_vtime = os::elapsedVTime();
834 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
835 }
836
837 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
838 AbstractGangTask("Concurrent Mark"), _cm(cm) { }
839
840 ~G1CMConcurrentMarkingTask() { }
841 };
842
843 uint G1ConcurrentMark::calc_active_marking_workers() {
844 uint result = 0;
845 if (!UseDynamicNumberOfGCThreads ||
846 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
847 !ForceDynamicNumberOfGCThreads)) {
848 result = _max_concurrent_workers;
849 } else {
850 result =
851 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
852 1, /* Minimum workers */
853 _num_concurrent_workers,
854 Threads::number_of_non_daemon_threads());
855 // Don't scale the result down by scale_concurrent_workers() because
856 // that scaling has already gone into "_max_concurrent_workers".
857 }
858 assert(result > 0 && result <= _max_concurrent_workers,
859 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
860 _max_concurrent_workers, result);
861 return result;
862 }
863
864 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
865 // Currently, only survivors can be root regions.
866 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
867 G1RootRegionScanClosure cl(_g1h, this, worker_id);
868
869 const uintx interval = PrefetchScanIntervalInBytes;
870 HeapWord* curr = hr->bottom();
871 const HeapWord* end = hr->top();
872 while (curr < end) {
873 Prefetch::read(curr, interval);
874 oop obj = oop(curr);
875 int size = obj->oop_iterate_size(&cl);
876 assert(size == obj->size(), "sanity");
877 curr += size;
878 }
879 }
880
881 class G1CMRootRegionScanTask : public AbstractGangTask {
882 G1ConcurrentMark* _cm;
883 public:
884 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
885 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
886
887 void work(uint worker_id) {
888 assert(Thread::current()->is_ConcurrentGC_thread(),
889 "this should only be done by a conc GC thread");
890
891 G1CMRootRegions* root_regions = _cm->root_regions();
892 HeapRegion* hr = root_regions->claim_next();
893 while (hr != NULL) {
894 _cm->scan_root_region(hr, worker_id);
895 hr = root_regions->claim_next();
896 }
897 }
898 };
899
900 void G1ConcurrentMark::scan_root_regions() {
901 // scan_in_progress() will have been set to true only if there was
902 // at least one root region to scan. So, if it's false, we
903 // should not attempt to do any further work.
904 if (root_regions()->scan_in_progress()) {
905 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
906
907 _num_concurrent_workers = MIN2(calc_active_marking_workers(),
908 // We distribute work on a per-region basis, so starting
909 // more threads than that is useless.
910 root_regions()->num_root_regions());
911 assert(_num_concurrent_workers <= _max_concurrent_workers,
912 "Maximum number of marking threads exceeded");
913
914 G1CMRootRegionScanTask task(this);
915 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
916 task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
917 _concurrent_workers->run_task(&task, _num_concurrent_workers);
918
919 // It's possible that has_aborted() is true here without actually
920 // aborting the survivor scan earlier. This is OK as it's
921 // mainly used for sanity checking.
922 root_regions()->scan_finished();
923 }
924 }
925
926 void G1ConcurrentMark::concurrent_cycle_start() {
927 _gc_timer_cm->register_gc_start();
928
929 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
930
931 _g1h->trace_heap_before_gc(_gc_tracer_cm);
932 }
933
934 void G1ConcurrentMark::concurrent_cycle_end() {
935 _g1h->collector_state()->set_clearing_next_bitmap(false);
936
937 _g1h->trace_heap_after_gc(_gc_tracer_cm);
938
939 if (has_aborted()) {
940 log_info(gc, marking)("Concurrent Mark Abort");
941 _gc_tracer_cm->report_concurrent_mode_failure();
942 }
943
944 _gc_timer_cm->register_gc_end();
945
946 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
947 }
948
949 void G1ConcurrentMark::mark_from_roots() {
950 _restart_for_overflow = false;
951
952 _num_concurrent_workers = calc_active_marking_workers();
953
954 uint active_workers = MAX2(1U, _num_concurrent_workers);
955
956 // Setting active workers is not guaranteed since fewer
957 // worker threads may currently exist and more may not be
958 // available.
959 active_workers = _concurrent_workers->update_active_workers(active_workers);
960 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
961
962 // Parallel task terminator is set in "set_concurrency_and_phase()"
963 set_concurrency_and_phase(active_workers, true /* concurrent */);
964
965 G1CMConcurrentMarkingTask marking_task(this);
966 _concurrent_workers->run_task(&marking_task);
967 print_stats();
968 }
969
970 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
971 G1HeapVerifier* verifier = _g1h->verifier();
972
973 verifier->verify_region_sets_optional();
974
975 if (VerifyDuringGC) {
976 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
977
978 size_t const BufLen = 512;
979 char buffer[BufLen];
980
981 jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
982 verifier->verify(type, vo, buffer);
983 }
984
985 verifier->check_bitmaps(caller);
986 }
987
988 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
989 G1CollectedHeap* _g1h;
990 G1ConcurrentMark* _cm;
991 HeapRegionClaimer _hrclaimer;
992 uint volatile _total_selected_for_rebuild;
993
994 G1PrintRegionLivenessInfoClosure _cl;
995
996 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
997 G1CollectedHeap* _g1h;
998 G1ConcurrentMark* _cm;
999
1000 G1PrintRegionLivenessInfoClosure* _cl;
1001
1002 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1003
1004 void update_remset_before_rebuild(HeapRegion* hr) {
1005 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1006
1007 bool selected_for_rebuild;
1008 if (hr->is_humongous()) {
1009 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1010 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1011 } else {
1012 size_t const live_bytes = _cm->liveness(hr->hrm_index());
1013 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1014 }
1015 if (selected_for_rebuild) {
1016 _num_regions_selected_for_rebuild++;
1017 }
1018 _cm->update_top_at_rebuild_start(hr);
1019 }
1020
1021 // Distribute the given words across the humongous object starting with hr and
1022 // note end of marking.
1023 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1024 uint const region_idx = hr->hrm_index();
1025 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1026 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1027
1028 // "Distributing" zero words means that we only note end of marking for these
1029 // regions.
1030 assert(marked_words == 0 || obj_size_in_words == marked_words,
1031 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1032 obj_size_in_words, marked_words);
1033
1034 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1035 HeapRegion* const r = _g1h->region_at(i);
1036 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1037
1038 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1039 words_to_add, i, r->get_type_str());
1040 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1041 marked_words -= words_to_add;
1042 }
1043 assert(marked_words == 0,
1044 SIZE_FORMAT " words left after distributing space across %u regions",
1045 marked_words, num_regions_in_humongous);
1046 }
1047
1048 void update_marked_bytes(HeapRegion* hr) {
1049 uint const region_idx = hr->hrm_index();
1050 size_t const marked_words = _cm->liveness(region_idx);
1051 // The marking attributes the object's size completely to the humongous starts
1052 // region. We need to distribute this value across the entire set of regions a
1053 // humongous object spans.
1054 if (hr->is_humongous()) {
1055 assert(hr->is_starts_humongous() || marked_words == 0,
1056 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1057 marked_words, region_idx, hr->get_type_str());
1058 if (hr->is_starts_humongous()) {
1059 distribute_marked_bytes(hr, marked_words);
1060 }
1061 } else {
1062 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1063 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1064 }
1065 }
1066
1067 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1068 hr->add_to_marked_bytes(marked_bytes);
1069 _cl->do_heap_region(hr);
1070 hr->note_end_of_marking();
1071 }
1072
1073 public:
1074 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1075 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1076
1077 virtual bool do_heap_region(HeapRegion* r) {
1078 update_remset_before_rebuild(r);
1079 update_marked_bytes(r);
1080
1081 return false;
1082 }
1083
1084 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1085 };
1086
1087 public:
1088 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1089 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1090 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1091
1092 virtual void work(uint worker_id) {
1093 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1094 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1095 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1096 }
1097
1098 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1099
1100 // Number of regions for which roughly one thread should be spawned for this work.
1101 static const uint RegionsPerThread = 384;
1102 };
1103
1104 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1105 G1CollectedHeap* _g1h;
1106 public:
1107 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1108
1109 virtual bool do_heap_region(HeapRegion* r) {
1110 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1111 return false;
1112 }
1113 };
1114
1115 void G1ConcurrentMark::remark() {
1116 assert_at_safepoint_on_vm_thread();
1117
1118 // If a full collection has happened, we should not continue. However we might
1119 // have ended up here as the Remark VM operation has been scheduled already.
1120 if (has_aborted()) {
1121 return;
1122 }
1123
1124 G1Policy* g1p = _g1h->g1_policy();
1125 g1p->record_concurrent_mark_remark_start();
1126
1127 double start = os::elapsedTime();
1128
1129 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1130
1131 {
1132 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1133 finalize_marking();
1134 }
1135
1136 double mark_work_end = os::elapsedTime();
1137
1138 bool const mark_finished = !has_overflown();
1139 if (mark_finished) {
1140 weak_refs_work(false /* clear_all_soft_refs */);
1141
1142 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1143 // We're done with marking.
1144 // This is the end of the marking cycle, we're expected all
1145 // threads to have SATB queues with active set to true.
1146 satb_mq_set.set_active_all_threads(false, /* new active value */
1147 true /* expected_active */);
1148
1149 {
1150 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1151 flush_all_task_caches();
1152 }
1153
1154 // Install newly created mark bitmap as "prev".
1155 swap_mark_bitmaps();
1156 {
1157 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1158
1159 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1160 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1161 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1162
1163 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1164 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1165 _g1h->workers()->run_task(&cl, num_workers);
1166
1167 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1168 _g1h->num_regions(), cl.total_selected_for_rebuild());
1169 }
1170 {
1171 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1172 reclaim_empty_regions();
1173 }
1174
1175 // Clean out dead classes
1176 if (ClassUnloadingWithConcurrentMark) {
1177 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1178 ClassLoaderDataGraph::purge();
1179 }
1180
1181 compute_new_sizes();
1182
1183 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1184
1185 assert(!restart_for_overflow(), "sanity");
1186 // Completely reset the marking state since marking completed
1187 reset_at_marking_complete();
1188 } else {
1189 // We overflowed. Restart concurrent marking.
1190 _restart_for_overflow = true;
1191
1192 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1193
1194 // Clear the marking state because we will be restarting
1195 // marking due to overflowing the global mark stack.
1196 reset_marking_for_restart();
1197 }
1198
1199 {
1200 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1201 report_object_count(mark_finished);
1202 }
1203
1204 // Statistics
1205 double now = os::elapsedTime();
1206 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1207 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1208 _remark_times.add((now - start) * 1000.0);
1209
1210 g1p->record_concurrent_mark_remark_end();
1211 }
1212
1213 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1214 // Per-region work during the Cleanup pause.
1215 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1216 G1CollectedHeap* _g1h;
1217 size_t _freed_bytes;
1218 FreeRegionList* _local_cleanup_list;
1219 uint _old_regions_removed;
1220 uint _humongous_regions_removed;
1221 HRRSCleanupTask* _hrrs_cleanup_task;
1222
1223 public:
1224 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1225 FreeRegionList* local_cleanup_list,
1226 HRRSCleanupTask* hrrs_cleanup_task) :
1227 _g1h(g1h),
1228 _freed_bytes(0),
1229 _local_cleanup_list(local_cleanup_list),
1230 _old_regions_removed(0),
1231 _humongous_regions_removed(0),
1232 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1233
1234 size_t freed_bytes() { return _freed_bytes; }
1235 const uint old_regions_removed() { return _old_regions_removed; }
1236 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1237
1238 bool do_heap_region(HeapRegion *hr) {
1239 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1240 _freed_bytes += hr->used();
1241 hr->set_containing_set(NULL);
1242 if (hr->is_humongous()) {
1243 _humongous_regions_removed++;
1244 _g1h->free_humongous_region(hr, _local_cleanup_list);
1245 } else {
1246 _old_regions_removed++;
1247 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1248 }
1249 hr->clear_cardtable();
1250 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1251 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1252 } else {
1253 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1254 }
1255
1256 return false;
1257 }
1258 };
1259
1260 G1CollectedHeap* _g1h;
1261 FreeRegionList* _cleanup_list;
1262 HeapRegionClaimer _hrclaimer;
1263
1264 public:
1265 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1266 AbstractGangTask("G1 Cleanup"),
1267 _g1h(g1h),
1268 _cleanup_list(cleanup_list),
1269 _hrclaimer(n_workers) {
1270
1271 HeapRegionRemSet::reset_for_cleanup_tasks();
1272 }
1273
1274 void work(uint worker_id) {
1275 FreeRegionList local_cleanup_list("Local Cleanup List");
1276 HRRSCleanupTask hrrs_cleanup_task;
1277 G1ReclaimEmptyRegionsClosure cl(_g1h,
1278 &local_cleanup_list,
1279 &hrrs_cleanup_task);
1280 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1281 assert(cl.is_complete(), "Shouldn't have aborted!");
1282
1283 // Now update the old/humongous region sets
1284 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1285 {
1286 MutexLocker x(ParGCRareEvent_lock);
1287 _g1h->decrement_summary_bytes(cl.freed_bytes());
1288
1289 _cleanup_list->add_ordered(&local_cleanup_list);
1290 assert(local_cleanup_list.is_empty(), "post-condition");
1291
1292 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1293 }
1294 }
1295 };
1296
1297 void G1ConcurrentMark::reclaim_empty_regions() {
1298 WorkGang* workers = _g1h->workers();
1299 FreeRegionList empty_regions_list("Empty Regions After Mark List");
1300
1301 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1302 workers->run_task(&cl);
1303
1304 if (!empty_regions_list.is_empty()) {
1305 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1306 // Now print the empty regions list.
1307 G1HRPrinter* hrp = _g1h->hr_printer();
1308 if (hrp->is_active()) {
1309 FreeRegionListIterator iter(&empty_regions_list);
1310 while (iter.more_available()) {
1311 HeapRegion* hr = iter.get_next();
1312 hrp->cleanup(hr);
1313 }
1314 }
1315 // And actually make them available.
1316 _g1h->prepend_to_freelist(&empty_regions_list);
1317 }
1318 }
1319
1320 void G1ConcurrentMark::compute_new_sizes() {
1321 MetaspaceGC::compute_new_size();
1322
1323 // Cleanup will have freed any regions completely full of garbage.
1324 // Update the soft reference policy with the new heap occupancy.
1325 Universe::update_heap_info_at_gc();
1326
1327 // We reclaimed old regions so we should calculate the sizes to make
1328 // sure we update the old gen/space data.
1329 _g1h->g1mm()->update_sizes();
1330 }
1331
1332 void G1ConcurrentMark::cleanup() {
1333 assert_at_safepoint_on_vm_thread();
1334
1335 // If a full collection has happened, we shouldn't do this.
1336 if (has_aborted()) {
1337 return;
1338 }
1339
1340 G1Policy* g1p = _g1h->g1_policy();
1341 g1p->record_concurrent_mark_cleanup_start();
1342
1343 double start = os::elapsedTime();
1344
1345 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1346
1347 {
1348 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1349 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1350 _g1h->heap_region_iterate(&cl);
1351 }
1352
1353 if (log_is_enabled(Trace, gc, liveness)) {
1354 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1355 _g1h->heap_region_iterate(&cl);
1356 }
1357
1358 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1359
1360 // We need to make this be a "collection" so any collection pause that
1361 // races with it goes around and waits for Cleanup to finish.
1362 _g1h->increment_total_collections();
1363
1364 // Local statistics
1365 double recent_cleanup_time = (os::elapsedTime() - start);
1366 _total_cleanup_time += recent_cleanup_time;
1367 _cleanup_times.add(recent_cleanup_time);
1368
1369 {
1370 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1371 _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1372 }
1373 }
1374
1375 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1376 // Uses the G1CMTask associated with a worker thread (for serial reference
1377 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1378 // trace referent objects.
1379 //
1380 // Using the G1CMTask and embedded local queues avoids having the worker
1381 // threads operating on the global mark stack. This reduces the risk
1382 // of overflowing the stack - which we would rather avoid at this late
1383 // state. Also using the tasks' local queues removes the potential
1384 // of the workers interfering with each other that could occur if
1385 // operating on the global stack.
1386
1387 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1388 G1ConcurrentMark* _cm;
1389 G1CMTask* _task;
1390 uint _ref_counter_limit;
1391 uint _ref_counter;
1392 bool _is_serial;
1393 public:
1394 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1395 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1396 _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1397 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1398 }
1399
1400 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1401 virtual void do_oop( oop* p) { do_oop_work(p); }
1402
1403 template <class T> void do_oop_work(T* p) {
1404 if (_cm->has_overflown()) {
1405 return;
1406 }
1407 if (!_task->deal_with_reference(p)) {
1408 // We did not add anything to the mark bitmap (or mark stack), so there is
1409 // no point trying to drain it.
1410 return;
1411 }
1412 _ref_counter--;
1413
1414 if (_ref_counter == 0) {
1415 // We have dealt with _ref_counter_limit references, pushing them
1416 // and objects reachable from them on to the local stack (and
1417 // possibly the global stack). Call G1CMTask::do_marking_step() to
1418 // process these entries.
1419 //
1420 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1421 // there's nothing more to do (i.e. we're done with the entries that
1422 // were pushed as a result of the G1CMTask::deal_with_reference() calls
1423 // above) or we overflow.
1424 //
1425 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1426 // flag while there may still be some work to do. (See the comment at
1427 // the beginning of G1CMTask::do_marking_step() for those conditions -
1428 // one of which is reaching the specified time target.) It is only
1429 // when G1CMTask::do_marking_step() returns without setting the
1430 // has_aborted() flag that the marking step has completed.
1431 do {
1432 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1433 _task->do_marking_step(mark_step_duration_ms,
1434 false /* do_termination */,
1435 _is_serial);
1436 } while (_task->has_aborted() && !_cm->has_overflown());
1437 _ref_counter = _ref_counter_limit;
1438 }
1439 }
1440 };
1441
1442 // 'Drain' oop closure used by both serial and parallel reference processing.
1443 // Uses the G1CMTask associated with a given worker thread (for serial
1444 // reference processing the G1CMtask for worker 0 is used). Calls the
1445 // do_marking_step routine, with an unbelievably large timeout value,
1446 // to drain the marking data structures of the remaining entries
1447 // added by the 'keep alive' oop closure above.
1448
1449 class G1CMDrainMarkingStackClosure : public VoidClosure {
1450 G1ConcurrentMark* _cm;
1451 G1CMTask* _task;
1452 bool _is_serial;
1453 public:
1454 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1455 _cm(cm), _task(task), _is_serial(is_serial) {
1456 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1457 }
1458
1459 void do_void() {
1460 do {
1461 // We call G1CMTask::do_marking_step() to completely drain the local
1462 // and global marking stacks of entries pushed by the 'keep alive'
1463 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1464 //
1465 // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1466 // if there's nothing more to do (i.e. we've completely drained the
1467 // entries that were pushed as a a result of applying the 'keep alive'
1468 // closure to the entries on the discovered ref lists) or we overflow
1469 // the global marking stack.
1470 //
1471 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1472 // flag while there may still be some work to do. (See the comment at
1473 // the beginning of G1CMTask::do_marking_step() for those conditions -
1474 // one of which is reaching the specified time target.) It is only
1475 // when G1CMTask::do_marking_step() returns without setting the
1476 // has_aborted() flag that the marking step has completed.
1477
1478 _task->do_marking_step(1000000000.0 /* something very large */,
1479 true /* do_termination */,
1480 _is_serial);
1481 } while (_task->has_aborted() && !_cm->has_overflown());
1482 }
1483 };
1484
1485 // Implementation of AbstractRefProcTaskExecutor for parallel
1486 // reference processing at the end of G1 concurrent marking
1487
1488 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1489 private:
1490 G1CollectedHeap* _g1h;
1491 G1ConcurrentMark* _cm;
1492 WorkGang* _workers;
1493 uint _active_workers;
1494
1495 public:
1496 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1497 G1ConcurrentMark* cm,
1498 WorkGang* workers,
1499 uint n_workers) :
1500 _g1h(g1h), _cm(cm),
1501 _workers(workers), _active_workers(n_workers) { }
1502
1503 virtual void execute(ProcessTask& task, uint ergo_workers);
1504 };
1505
1506 class G1CMRefProcTaskProxy : public AbstractGangTask {
1507 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1508 ProcessTask& _proc_task;
1509 G1CollectedHeap* _g1h;
1510 G1ConcurrentMark* _cm;
1511
1512 public:
1513 G1CMRefProcTaskProxy(ProcessTask& proc_task,
1514 G1CollectedHeap* g1h,
1515 G1ConcurrentMark* cm) :
1516 AbstractGangTask("Process reference objects in parallel"),
1517 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1518 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1519 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1520 }
1521
1522 virtual void work(uint worker_id) {
1523 ResourceMark rm;
1524 HandleMark hm;
1525 G1CMTask* task = _cm->task(worker_id);
1526 G1CMIsAliveClosure g1_is_alive(_g1h);
1527 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1528 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1529
1530 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1531 }
1532 };
1533
1534 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1535 assert(_workers != NULL, "Need parallel worker threads.");
1536 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1537 assert(_workers->active_workers() >= ergo_workers,
1538 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1539 ergo_workers, _workers->active_workers());
1540
1541 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1542
1543 // We need to reset the concurrency level before each
1544 // proxy task execution, so that the termination protocol
1545 // and overflow handling in G1CMTask::do_marking_step() knows
1546 // how many workers to wait for.
1547 _cm->set_concurrency(ergo_workers);
1548 _workers->run_task(&proc_task_proxy, ergo_workers);
1549 }
1550
1551 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1552 ResourceMark rm;
1553 HandleMark hm;
1554
1555 // Is alive closure.
1556 G1CMIsAliveClosure g1_is_alive(_g1h);
1557
1558 // Inner scope to exclude the cleaning of the string table
1559 // from the displayed time.
1560 {
1561 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1562
1563 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1564
1565 // See the comment in G1CollectedHeap::ref_processing_init()
1566 // about how reference processing currently works in G1.
1567
1568 // Set the soft reference policy
1569 rp->setup_policy(clear_all_soft_refs);
1570 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1571
1572 // Instances of the 'Keep Alive' and 'Complete GC' closures used
1573 // in serial reference processing. Note these closures are also
1574 // used for serially processing (by the the current thread) the
1575 // JNI references during parallel reference processing.
1576 //
1577 // These closures do not need to synchronize with the worker
1578 // threads involved in parallel reference processing as these
1579 // instances are executed serially by the current thread (e.g.
1580 // reference processing is not multi-threaded and is thus
1581 // performed by the current thread instead of a gang worker).
1582 //
1583 // The gang tasks involved in parallel reference processing create
1584 // their own instances of these closures, which do their own
1585 // synchronization among themselves.
1586 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1587 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1588
1589 // We need at least one active thread. If reference processing
1590 // is not multi-threaded we use the current (VMThread) thread,
1591 // otherwise we use the work gang from the G1CollectedHeap and
1592 // we utilize all the worker threads we can.
1593 bool processing_is_mt = rp->processing_is_mt();
1594 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1595 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1596
1597 // Parallel processing task executor.
1598 G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1599 _g1h->workers(), active_workers);
1600 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1601
1602 // Set the concurrency level. The phase was already set prior to
1603 // executing the remark task.
1604 set_concurrency(active_workers);
1605
1606 // Set the degree of MT processing here. If the discovery was done MT,
1607 // the number of threads involved during discovery could differ from
1608 // the number of active workers. This is OK as long as the discovered
1609 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1610 rp->set_active_mt_degree(active_workers);
1611
1612 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1613
1614 // Process the weak references.
1615 const ReferenceProcessorStats& stats =
1616 rp->process_discovered_references(&g1_is_alive,
1617 &g1_keep_alive,
1618 &g1_drain_mark_stack,
1619 executor,
1620 &pt);
1621 _gc_tracer_cm->report_gc_reference_stats(stats);
1622 pt.print_all_references();
1623
1624 // The do_oop work routines of the keep_alive and drain_marking_stack
1625 // oop closures will set the has_overflown flag if we overflow the
1626 // global marking stack.
1627
1628 assert(has_overflown() || _global_mark_stack.is_empty(),
1629 "Mark stack should be empty (unless it has overflown)");
1630
1631 assert(rp->num_queues() == active_workers, "why not");
1632
1633 rp->verify_no_references_recorded();
1634 assert(!rp->discovery_enabled(), "Post condition");
1635 }
1636
1637 if (has_overflown()) {
1638 // We can not trust g1_is_alive and the contents of the heap if the marking stack
1639 // overflowed while processing references. Exit the VM.
1640 fatal("Overflow during reference processing, can not continue. Please "
1641 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1642 "restart.", MarkStackSizeMax);
1643 return;
1644 }
1645
1646 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1647
1648 {
1649 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1650 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1651 }
1652
1653 // Unload Klasses, String, Code Cache, etc.
1654 if (ClassUnloadingWithConcurrentMark) {
1655 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1656 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
1657 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1658 } else {
1659 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1660 // No need to clean string table as it is treated as strong roots when
1661 // class unloading is disabled.
1662 _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled());
1663 }
1664 }
1665
1666 class G1PrecleanYieldClosure : public YieldClosure {
1667 G1ConcurrentMark* _cm;
1668
1669 public:
1670 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1671
1672 virtual bool should_return() {
1673 return _cm->has_aborted();
1674 }
1675
1676 virtual bool should_return_fine_grain() {
1677 _cm->do_yield_check();
1678 return _cm->has_aborted();
1679 }
1680 };
1681
1682 void G1ConcurrentMark::preclean() {
1683 assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1684
1685 SuspendibleThreadSetJoiner joiner;
1686
1687 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1688 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1689
1690 set_concurrency_and_phase(1, true);
1691
1692 G1PrecleanYieldClosure yield_cl(this);
1693
1694 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1695 // Precleaning is single threaded. Temporarily disable MT discovery.
1696 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1697 rp->preclean_discovered_references(rp->is_alive_non_header(),
1698 &keep_alive,
1699 &drain_mark_stack,
1700 &yield_cl,
1701 _gc_timer_cm);
1702 }
1703
1704 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1705 // the prev bitmap determining liveness.
1706 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1707 G1CollectedHeap* _g1h;
1708 public:
1709 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1710
1711 bool do_object_b(oop obj) {
1712 HeapWord* addr = (HeapWord*)obj;
1713 return addr != NULL &&
1714 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1715 }
1716 };
1717
1718 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1719 // Depending on the completion of the marking liveness needs to be determined
1720 // using either the next or prev bitmap.
1721 if (mark_completed) {
1722 G1ObjectCountIsAliveClosure is_alive(_g1h);
1723 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1724 } else {
1725 G1CMIsAliveClosure is_alive(_g1h);
1726 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1727 }
1728 }
1729
1730
1731 void G1ConcurrentMark::swap_mark_bitmaps() {
1732 G1CMBitMap* temp = _prev_mark_bitmap;
1733 _prev_mark_bitmap = _next_mark_bitmap;
1734 _next_mark_bitmap = temp;
1735 _g1h->collector_state()->set_clearing_next_bitmap(true);
1736 }
1737
1738 // Closure for marking entries in SATB buffers.
1739 class G1CMSATBBufferClosure : public SATBBufferClosure {
1740 private:
1741 G1CMTask* _task;
1742 G1CollectedHeap* _g1h;
1743
1744 // This is very similar to G1CMTask::deal_with_reference, but with
1745 // more relaxed requirements for the argument, so this must be more
1746 // circumspect about treating the argument as an object.
1747 void do_entry(void* entry) const {
1748 _task->increment_refs_reached();
1749 oop const obj = static_cast<oop>(entry);
1750 _task->make_reference_grey(obj);
1751 }
1752
1753 public:
1754 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1755 : _task(task), _g1h(g1h) { }
1756
1757 virtual void do_buffer(void** buffer, size_t size) {
1758 for (size_t i = 0; i < size; ++i) {
1759 do_entry(buffer[i]);
1760 }
1761 }
1762 };
1763
1764 class G1RemarkThreadsClosure : public ThreadClosure {
1765 G1CMSATBBufferClosure _cm_satb_cl;
1766 G1CMOopClosure _cm_cl;
1767 MarkingCodeBlobClosure _code_cl;
1768 int _thread_parity;
1769
1770 public:
1771 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1772 _cm_satb_cl(task, g1h),
1773 _cm_cl(g1h, task),
1774 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1775 _thread_parity(Threads::thread_claim_parity()) {}
1776
1777 void do_thread(Thread* thread) {
1778 if (thread->is_Java_thread()) {
1779 if (thread->claim_oops_do(true, _thread_parity)) {
1780 JavaThread* jt = (JavaThread*)thread;
1781
1782 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1783 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1784 // * Alive if on the stack of an executing method
1785 // * Weakly reachable otherwise
1786 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1787 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1788 jt->nmethods_do(&_code_cl);
1789
1790 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl);
1791 }
1792 } else if (thread->is_VM_thread()) {
1793 if (thread->claim_oops_do(true, _thread_parity)) {
1794 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1795 }
1796 }
1797 }
1798 };
1799
1800 class G1CMRemarkTask : public AbstractGangTask {
1801 G1ConcurrentMark* _cm;
1802 public:
1803 void work(uint worker_id) {
1804 G1CMTask* task = _cm->task(worker_id);
1805 task->record_start_time();
1806 {
1807 ResourceMark rm;
1808 HandleMark hm;
1809
1810 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1811 Threads::threads_do(&threads_f);
1812 }
1813
1814 do {
1815 task->do_marking_step(1000000000.0 /* something very large */,
1816 true /* do_termination */,
1817 false /* is_serial */);
1818 } while (task->has_aborted() && !_cm->has_overflown());
1819 // If we overflow, then we do not want to restart. We instead
1820 // want to abort remark and do concurrent marking again.
1821 task->record_end_time();
1822 }
1823
1824 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1825 AbstractGangTask("Par Remark"), _cm(cm) {
1826 _cm->terminator()->reset_for_reuse(active_workers);
1827 }
1828 };
1829
1830 void G1ConcurrentMark::finalize_marking() {
1831 ResourceMark rm;
1832 HandleMark hm;
1833
1834 _g1h->ensure_parsability(false);
1835
1836 // this is remark, so we'll use up all active threads
1837 uint active_workers = _g1h->workers()->active_workers();
1838 set_concurrency_and_phase(active_workers, false /* concurrent */);
1839 // Leave _parallel_marking_threads at it's
1840 // value originally calculated in the G1ConcurrentMark
1841 // constructor and pass values of the active workers
1842 // through the gang in the task.
1843
1844 {
1845 StrongRootsScope srs(active_workers);
1846
1847 G1CMRemarkTask remarkTask(this, active_workers);
1848 // We will start all available threads, even if we decide that the
1849 // active_workers will be fewer. The extra ones will just bail out
1850 // immediately.
1851 _g1h->workers()->run_task(&remarkTask);
1852 }
1853
1854 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1855 guarantee(has_overflown() ||
1856 satb_mq_set.completed_buffers_num() == 0,
1857 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1858 BOOL_TO_STR(has_overflown()),
1859 satb_mq_set.completed_buffers_num());
1860
1861 print_stats();
1862 }
1863
1864 void G1ConcurrentMark::flush_all_task_caches() {
1865 size_t hits = 0;
1866 size_t misses = 0;
1867 for (uint i = 0; i < _max_num_tasks; i++) {
1868 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1869 hits += stats.first;
1870 misses += stats.second;
1871 }
1872 size_t sum = hits + misses;
1873 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1874 hits, misses, percent_of(hits, sum));
1875 }
1876
1877 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1878 _prev_mark_bitmap->clear_range(mr);
1879 }
1880
1881 HeapRegion*
1882 G1ConcurrentMark::claim_region(uint worker_id) {
1883 // "checkpoint" the finger
1884 HeapWord* finger = _finger;
1885
1886 while (finger < _heap.end()) {
1887 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1888
1889 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1890 // Make sure that the reads below do not float before loading curr_region.
1891 OrderAccess::loadload();
1892 // Above heap_region_containing may return NULL as we always scan claim
1893 // until the end of the heap. In this case, just jump to the next region.
1894 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1895
1896 // Is the gap between reading the finger and doing the CAS too long?
1897 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1898 if (res == finger && curr_region != NULL) {
1899 // we succeeded
1900 HeapWord* bottom = curr_region->bottom();
1901 HeapWord* limit = curr_region->next_top_at_mark_start();
1902
1903 // notice that _finger == end cannot be guaranteed here since,
1904 // someone else might have moved the finger even further
1905 assert(_finger >= end, "the finger should have moved forward");
1906
1907 if (limit > bottom) {
1908 return curr_region;
1909 } else {
1910 assert(limit == bottom,
1911 "the region limit should be at bottom");
1912 // we return NULL and the caller should try calling
1913 // claim_region() again.
1914 return NULL;
1915 }
1916 } else {
1917 assert(_finger > finger, "the finger should have moved forward");
1918 // read it again
1919 finger = _finger;
1920 }
1921 }
1922
1923 return NULL;
1924 }
1925
1926 #ifndef PRODUCT
1927 class VerifyNoCSetOops {
1928 G1CollectedHeap* _g1h;
1929 const char* _phase;
1930 int _info;
1931
1932 public:
1933 VerifyNoCSetOops(const char* phase, int info = -1) :
1934 _g1h(G1CollectedHeap::heap()),
1935 _phase(phase),
1936 _info(info)
1937 { }
1938
1939 void operator()(G1TaskQueueEntry task_entry) const {
1940 if (task_entry.is_array_slice()) {
1941 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1942 return;
1943 }
1944 guarantee(oopDesc::is_oop(task_entry.obj()),
1945 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1946 p2i(task_entry.obj()), _phase, _info);
1947 guarantee(!_g1h->is_in_cset(task_entry.obj()),
1948 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1949 p2i(task_entry.obj()), _phase, _info);
1950 }
1951 };
1952
1953 void G1ConcurrentMark::verify_no_cset_oops() {
1954 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1955 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1956 return;
1957 }
1958
1959 // Verify entries on the global mark stack
1960 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1961
1962 // Verify entries on the task queues
1963 for (uint i = 0; i < _max_num_tasks; ++i) {
1964 G1CMTaskQueue* queue = _task_queues->queue(i);
1965 queue->iterate(VerifyNoCSetOops("Queue", i));
1966 }
1967
1968 // Verify the global finger
1969 HeapWord* global_finger = finger();
1970 if (global_finger != NULL && global_finger < _heap.end()) {
1971 // Since we always iterate over all regions, we might get a NULL HeapRegion
1972 // here.
1973 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1974 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1975 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1976 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1977 }
1978
1979 // Verify the task fingers
1980 assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1981 for (uint i = 0; i < _num_concurrent_workers; ++i) {
1982 G1CMTask* task = _tasks[i];
1983 HeapWord* task_finger = task->finger();
1984 if (task_finger != NULL && task_finger < _heap.end()) {
1985 // See above note on the global finger verification.
1986 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1987 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1988 !task_hr->in_collection_set(),
1989 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1990 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1991 }
1992 }
1993 }
1994 #endif // PRODUCT
1995
1996 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1997 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1998 }
1999
2000 void G1ConcurrentMark::print_stats() {
2001 if (!log_is_enabled(Debug, gc, stats)) {
2002 return;
2003 }
2004 log_debug(gc, stats)("---------------------------------------------------------------------");
2005 for (size_t i = 0; i < _num_active_tasks; ++i) {
2006 _tasks[i]->print_stats();
2007 log_debug(gc, stats)("---------------------------------------------------------------------");
2008 }
2009 }
2010
2011 void G1ConcurrentMark::concurrent_cycle_abort() {
2012 if (!cm_thread()->during_cycle() || _has_aborted) {
2013 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2014 return;
2015 }
2016
2017 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2018 // concurrent bitmap clearing.
2019 {
2020 GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2021 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2022 }
2023 // Note we cannot clear the previous marking bitmap here
2024 // since VerifyDuringGC verifies the objects marked during
2025 // a full GC against the previous bitmap.
2026
2027 // Empty mark stack
2028 reset_marking_for_restart();
2029 for (uint i = 0; i < _max_num_tasks; ++i) {
2030 _tasks[i]->clear_region_fields();
2031 }
2032 _first_overflow_barrier_sync.abort();
2033 _second_overflow_barrier_sync.abort();
2034 _has_aborted = true;
2035
2036 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2037 satb_mq_set.abandon_partial_marking();
2038 // This can be called either during or outside marking, we'll read
2039 // the expected_active value from the SATB queue set.
2040 satb_mq_set.set_active_all_threads(
2041 false, /* new active value */
2042 satb_mq_set.is_active() /* expected_active */);
2043 }
2044
2045 static void print_ms_time_info(const char* prefix, const char* name,
2046 NumberSeq& ns) {
2047 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2048 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2049 if (ns.num() > 0) {
2050 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2051 prefix, ns.sd(), ns.maximum());
2052 }
2053 }
2054
2055 void G1ConcurrentMark::print_summary_info() {
2056 Log(gc, marking) log;
2057 if (!log.is_trace()) {
2058 return;
2059 }
2060
2061 log.trace(" Concurrent marking:");
2062 print_ms_time_info(" ", "init marks", _init_times);
2063 print_ms_time_info(" ", "remarks", _remark_times);
2064 {
2065 print_ms_time_info(" ", "final marks", _remark_mark_times);
2066 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
2067
2068 }
2069 print_ms_time_info(" ", "cleanups", _cleanup_times);
2070 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2071 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2072 log.trace(" Total stop_world time = %8.2f s.",
2073 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2074 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).",
2075 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2076 }
2077
2078 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2079 _concurrent_workers->print_worker_threads_on(st);
2080 }
2081
2082 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2083 _concurrent_workers->threads_do(tc);
2084 }
2085
2086 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2087 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2088 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2089 _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2090 _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2091 }
2092
2093 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2094 ReferenceProcessor* result = g1h->ref_processor_cm();
2095 assert(result != NULL, "CM reference processor should not be NULL");
2096 return result;
2097 }
2098
2099 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2100 G1CMTask* task)
2101 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2102 _g1h(g1h), _task(task)
2103 { }
2104
2105 void G1CMTask::setup_for_region(HeapRegion* hr) {
2106 assert(hr != NULL,
2107 "claim_region() should have filtered out NULL regions");
2108 _curr_region = hr;
2109 _finger = hr->bottom();
2110 update_region_limit();
2111 }
2112
2113 void G1CMTask::update_region_limit() {
2114 HeapRegion* hr = _curr_region;
2115 HeapWord* bottom = hr->bottom();
2116 HeapWord* limit = hr->next_top_at_mark_start();
2117
2118 if (limit == bottom) {
2119 // The region was collected underneath our feet.
2120 // We set the finger to bottom to ensure that the bitmap
2121 // iteration that will follow this will not do anything.
2122 // (this is not a condition that holds when we set the region up,
2123 // as the region is not supposed to be empty in the first place)
2124 _finger = bottom;
2125 } else if (limit >= _region_limit) {
2126 assert(limit >= _finger, "peace of mind");
2127 } else {
2128 assert(limit < _region_limit, "only way to get here");
2129 // This can happen under some pretty unusual circumstances. An
2130 // evacuation pause empties the region underneath our feet (NTAMS
2131 // at bottom). We then do some allocation in the region (NTAMS
2132 // stays at bottom), followed by the region being used as a GC
2133 // alloc region (NTAMS will move to top() and the objects
2134 // originally below it will be grayed). All objects now marked in
2135 // the region are explicitly grayed, if below the global finger,
2136 // and we do not need in fact to scan anything else. So, we simply
2137 // set _finger to be limit to ensure that the bitmap iteration
2138 // doesn't do anything.
2139 _finger = limit;
2140 }
2141
2142 _region_limit = limit;
2143 }
2144
2145 void G1CMTask::giveup_current_region() {
2146 assert(_curr_region != NULL, "invariant");
2147 clear_region_fields();
2148 }
2149
2150 void G1CMTask::clear_region_fields() {
2151 // Values for these three fields that indicate that we're not
2152 // holding on to a region.
2153 _curr_region = NULL;
2154 _finger = NULL;
2155 _region_limit = NULL;
2156 }
2157
2158 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2159 if (cm_oop_closure == NULL) {
2160 assert(_cm_oop_closure != NULL, "invariant");
2161 } else {
2162 assert(_cm_oop_closure == NULL, "invariant");
2163 }
2164 _cm_oop_closure = cm_oop_closure;
2165 }
2166
2167 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2168 guarantee(next_mark_bitmap != NULL, "invariant");
2169 _next_mark_bitmap = next_mark_bitmap;
2170 clear_region_fields();
2171
2172 _calls = 0;
2173 _elapsed_time_ms = 0.0;
2174 _termination_time_ms = 0.0;
2175 _termination_start_time_ms = 0.0;
2176
2177 _mark_stats_cache.reset();
2178 }
2179
2180 bool G1CMTask::should_exit_termination() {
2181 regular_clock_call();
2182 // This is called when we are in the termination protocol. We should
2183 // quit if, for some reason, this task wants to abort or the global
2184 // stack is not empty (this means that we can get work from it).
2185 return !_cm->mark_stack_empty() || has_aborted();
2186 }
2187
2188 void G1CMTask::reached_limit() {
2189 assert(_words_scanned >= _words_scanned_limit ||
2190 _refs_reached >= _refs_reached_limit ,
2191 "shouldn't have been called otherwise");
2192 regular_clock_call();
2193 }
2194
2195 void G1CMTask::regular_clock_call() {
2196 if (has_aborted()) {
2197 return;
2198 }
2199
2200 // First, we need to recalculate the words scanned and refs reached
2201 // limits for the next clock call.
2202 recalculate_limits();
2203
2204 // During the regular clock call we do the following
2205
2206 // (1) If an overflow has been flagged, then we abort.
2207 if (_cm->has_overflown()) {
2208 set_has_aborted();
2209 return;
2210 }
2211
2212 // If we are not concurrent (i.e. we're doing remark) we don't need
2213 // to check anything else. The other steps are only needed during
2214 // the concurrent marking phase.
2215 if (!_cm->concurrent()) {
2216 return;
2217 }
2218
2219 // (2) If marking has been aborted for Full GC, then we also abort.
2220 if (_cm->has_aborted()) {
2221 set_has_aborted();
2222 return;
2223 }
2224
2225 double curr_time_ms = os::elapsedVTime() * 1000.0;
2226
2227 // (4) We check whether we should yield. If we have to, then we abort.
2228 if (SuspendibleThreadSet::should_yield()) {
2229 // We should yield. To do this we abort the task. The caller is
2230 // responsible for yielding.
2231 set_has_aborted();
2232 return;
2233 }
2234
2235 // (5) We check whether we've reached our time quota. If we have,
2236 // then we abort.
2237 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2238 if (elapsed_time_ms > _time_target_ms) {
2239 set_has_aborted();
2240 _has_timed_out = true;
2241 return;
2242 }
2243
2244 // (6) Finally, we check whether there are enough completed STAB
2245 // buffers available for processing. If there are, we abort.
2246 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2247 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2248 // we do need to process SATB buffers, we'll abort and restart
2249 // the marking task to do so
2250 set_has_aborted();
2251 return;
2252 }
2253 }
2254
2255 void G1CMTask::recalculate_limits() {
2256 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2257 _words_scanned_limit = _real_words_scanned_limit;
2258
2259 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2260 _refs_reached_limit = _real_refs_reached_limit;
2261 }
2262
2263 void G1CMTask::decrease_limits() {
2264 // This is called when we believe that we're going to do an infrequent
2265 // operation which will increase the per byte scanned cost (i.e. move
2266 // entries to/from the global stack). It basically tries to decrease the
2267 // scanning limit so that the clock is called earlier.
2268
2269 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2270 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2271 }
2272
2273 void G1CMTask::move_entries_to_global_stack() {
2274 // Local array where we'll store the entries that will be popped
2275 // from the local queue.
2276 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2277
2278 size_t n = 0;
2279 G1TaskQueueEntry task_entry;
2280 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2281 buffer[n] = task_entry;
2282 ++n;
2283 }
2284 if (n < G1CMMarkStack::EntriesPerChunk) {
2285 buffer[n] = G1TaskQueueEntry();
2286 }
2287
2288 if (n > 0) {
2289 if (!_cm->mark_stack_push(buffer)) {
2290 set_has_aborted();
2291 }
2292 }
2293
2294 // This operation was quite expensive, so decrease the limits.
2295 decrease_limits();
2296 }
2297
2298 bool G1CMTask::get_entries_from_global_stack() {
2299 // Local array where we'll store the entries that will be popped
2300 // from the global stack.
2301 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2302
2303 if (!_cm->mark_stack_pop(buffer)) {
2304 return false;
2305 }
2306
2307 // We did actually pop at least one entry.
2308 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2309 G1TaskQueueEntry task_entry = buffer[i];
2310 if (task_entry.is_null()) {
2311 break;
2312 }
2313 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2314 bool success = _task_queue->push(task_entry);
2315 // We only call this when the local queue is empty or under a
2316 // given target limit. So, we do not expect this push to fail.
2317 assert(success, "invariant");
2318 }
2319
2320 // This operation was quite expensive, so decrease the limits
2321 decrease_limits();
2322 return true;
2323 }
2324
2325 void G1CMTask::drain_local_queue(bool partially) {
2326 if (has_aborted()) {
2327 return;
2328 }
2329
2330 // Decide what the target size is, depending whether we're going to
2331 // drain it partially (so that other tasks can steal if they run out
2332 // of things to do) or totally (at the very end).
2333 size_t target_size;
2334 if (partially) {
2335 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2336 } else {
2337 target_size = 0;
2338 }
2339
2340 if (_task_queue->size() > target_size) {
2341 G1TaskQueueEntry entry;
2342 bool ret = _task_queue->pop_local(entry);
2343 while (ret) {
2344 scan_task_entry(entry);
2345 if (_task_queue->size() <= target_size || has_aborted()) {
2346 ret = false;
2347 } else {
2348 ret = _task_queue->pop_local(entry);
2349 }
2350 }
2351 }
2352 }
2353
2354 void G1CMTask::drain_global_stack(bool partially) {
2355 if (has_aborted()) {
2356 return;
2357 }
2358
2359 // We have a policy to drain the local queue before we attempt to
2360 // drain the global stack.
2361 assert(partially || _task_queue->size() == 0, "invariant");
2362
2363 // Decide what the target size is, depending whether we're going to
2364 // drain it partially (so that other tasks can steal if they run out
2365 // of things to do) or totally (at the very end).
2366 // Notice that when draining the global mark stack partially, due to the racyness
2367 // of the mark stack size update we might in fact drop below the target. But,
2368 // this is not a problem.
2369 // In case of total draining, we simply process until the global mark stack is
2370 // totally empty, disregarding the size counter.
2371 if (partially) {
2372 size_t const target_size = _cm->partial_mark_stack_size_target();
2373 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2374 if (get_entries_from_global_stack()) {
2375 drain_local_queue(partially);
2376 }
2377 }
2378 } else {
2379 while (!has_aborted() && get_entries_from_global_stack()) {
2380 drain_local_queue(partially);
2381 }
2382 }
2383 }
2384
2385 // SATB Queue has several assumptions on whether to call the par or
2386 // non-par versions of the methods. this is why some of the code is
2387 // replicated. We should really get rid of the single-threaded version
2388 // of the code to simplify things.
2389 void G1CMTask::drain_satb_buffers() {
2390 if (has_aborted()) {
2391 return;
2392 }
2393
2394 // We set this so that the regular clock knows that we're in the
2395 // middle of draining buffers and doesn't set the abort flag when it
2396 // notices that SATB buffers are available for draining. It'd be
2397 // very counter productive if it did that. :-)
2398 _draining_satb_buffers = true;
2399
2400 G1CMSATBBufferClosure satb_cl(this, _g1h);
2401 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2402
2403 // This keeps claiming and applying the closure to completed buffers
2404 // until we run out of buffers or we need to abort.
2405 while (!has_aborted() &&
2406 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2407 regular_clock_call();
2408 }
2409
2410 _draining_satb_buffers = false;
2411
2412 assert(has_aborted() ||
2413 _cm->concurrent() ||
2414 satb_mq_set.completed_buffers_num() == 0, "invariant");
2415
2416 // again, this was a potentially expensive operation, decrease the
2417 // limits to get the regular clock call early
2418 decrease_limits();
2419 }
2420
2421 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2422 _mark_stats_cache.reset(region_idx);
2423 }
2424
2425 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2426 return _mark_stats_cache.evict_all();
2427 }
2428
2429 void G1CMTask::print_stats() {
2430 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2431 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2432 _elapsed_time_ms, _termination_time_ms);
2433 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2434 _step_times_ms.num(),
2435 _step_times_ms.avg(),
2436 _step_times_ms.sd(),
2437 _step_times_ms.maximum(),
2438 _step_times_ms.sum());
2439 size_t const hits = _mark_stats_cache.hits();
2440 size_t const misses = _mark_stats_cache.misses();
2441 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2442 hits, misses, percent_of(hits, hits + misses));
2443 }
2444
2445 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2446 return _task_queues->steal(worker_id, task_entry);
2447 }
2448
2449 /*****************************************************************************
2450
2451 The do_marking_step(time_target_ms, ...) method is the building
2452 block of the parallel marking framework. It can be called in parallel
2453 with other invocations of do_marking_step() on different tasks
2454 (but only one per task, obviously) and concurrently with the
2455 mutator threads, or during remark, hence it eliminates the need
2456 for two versions of the code. When called during remark, it will
2457 pick up from where the task left off during the concurrent marking
2458 phase. Interestingly, tasks are also claimable during evacuation
2459 pauses too, since do_marking_step() ensures that it aborts before
2460 it needs to yield.
2461
2462 The data structures that it uses to do marking work are the
2463 following:
2464
2465 (1) Marking Bitmap. If there are gray objects that appear only
2466 on the bitmap (this happens either when dealing with an overflow
2467 or when the initial marking phase has simply marked the roots
2468 and didn't push them on the stack), then tasks claim heap
2469 regions whose bitmap they then scan to find gray objects. A
2470 global finger indicates where the end of the last claimed region
2471 is. A local finger indicates how far into the region a task has
2472 scanned. The two fingers are used to determine how to gray an
2473 object (i.e. whether simply marking it is OK, as it will be
2474 visited by a task in the future, or whether it needs to be also
2475 pushed on a stack).
2476
2477 (2) Local Queue. The local queue of the task which is accessed
2478 reasonably efficiently by the task. Other tasks can steal from
2479 it when they run out of work. Throughout the marking phase, a
2480 task attempts to keep its local queue short but not totally
2481 empty, so that entries are available for stealing by other
2482 tasks. Only when there is no more work, a task will totally
2483 drain its local queue.
2484
2485 (3) Global Mark Stack. This handles local queue overflow. During
2486 marking only sets of entries are moved between it and the local
2487 queues, as access to it requires a mutex and more fine-grain
2488 interaction with it which might cause contention. If it
2489 overflows, then the marking phase should restart and iterate
2490 over the bitmap to identify gray objects. Throughout the marking
2491 phase, tasks attempt to keep the global mark stack at a small
2492 length but not totally empty, so that entries are available for
2493 popping by other tasks. Only when there is no more work, tasks
2494 will totally drain the global mark stack.
2495
2496 (4) SATB Buffer Queue. This is where completed SATB buffers are
2497 made available. Buffers are regularly removed from this queue
2498 and scanned for roots, so that the queue doesn't get too
2499 long. During remark, all completed buffers are processed, as
2500 well as the filled in parts of any uncompleted buffers.
2501
2502 The do_marking_step() method tries to abort when the time target
2503 has been reached. There are a few other cases when the
2504 do_marking_step() method also aborts:
2505
2506 (1) When the marking phase has been aborted (after a Full GC).
2507
2508 (2) When a global overflow (on the global stack) has been
2509 triggered. Before the task aborts, it will actually sync up with
2510 the other tasks to ensure that all the marking data structures
2511 (local queues, stacks, fingers etc.) are re-initialized so that
2512 when do_marking_step() completes, the marking phase can
2513 immediately restart.
2514
2515 (3) When enough completed SATB buffers are available. The
2516 do_marking_step() method only tries to drain SATB buffers right
2517 at the beginning. So, if enough buffers are available, the
2518 marking step aborts and the SATB buffers are processed at
2519 the beginning of the next invocation.
2520
2521 (4) To yield. when we have to yield then we abort and yield
2522 right at the end of do_marking_step(). This saves us from a lot
2523 of hassle as, by yielding we might allow a Full GC. If this
2524 happens then objects will be compacted underneath our feet, the
2525 heap might shrink, etc. We save checking for this by just
2526 aborting and doing the yield right at the end.
2527
2528 From the above it follows that the do_marking_step() method should
2529 be called in a loop (or, otherwise, regularly) until it completes.
2530
2531 If a marking step completes without its has_aborted() flag being
2532 true, it means it has completed the current marking phase (and
2533 also all other marking tasks have done so and have all synced up).
2534
2535 A method called regular_clock_call() is invoked "regularly" (in
2536 sub ms intervals) throughout marking. It is this clock method that
2537 checks all the abort conditions which were mentioned above and
2538 decides when the task should abort. A work-based scheme is used to
2539 trigger this clock method: when the number of object words the
2540 marking phase has scanned or the number of references the marking
2541 phase has visited reach a given limit. Additional invocations to
2542 the method clock have been planted in a few other strategic places
2543 too. The initial reason for the clock method was to avoid calling
2544 vtime too regularly, as it is quite expensive. So, once it was in
2545 place, it was natural to piggy-back all the other conditions on it
2546 too and not constantly check them throughout the code.
2547
2548 If do_termination is true then do_marking_step will enter its
2549 termination protocol.
2550
2551 The value of is_serial must be true when do_marking_step is being
2552 called serially (i.e. by the VMThread) and do_marking_step should
2553 skip any synchronization in the termination and overflow code.
2554 Examples include the serial remark code and the serial reference
2555 processing closures.
2556
2557 The value of is_serial must be false when do_marking_step is
2558 being called by any of the worker threads in a work gang.
2559 Examples include the concurrent marking code (CMMarkingTask),
2560 the MT remark code, and the MT reference processing closures.
2561
2562 *****************************************************************************/
2563
2564 void G1CMTask::do_marking_step(double time_target_ms,
2565 bool do_termination,
2566 bool is_serial) {
2567 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2568
2569 _start_time_ms = os::elapsedVTime() * 1000.0;
2570
2571 // If do_stealing is true then do_marking_step will attempt to
2572 // steal work from the other G1CMTasks. It only makes sense to
2573 // enable stealing when the termination protocol is enabled
2574 // and do_marking_step() is not being called serially.
2575 bool do_stealing = do_termination && !is_serial;
2576
2577 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2578 _time_target_ms = time_target_ms - diff_prediction_ms;
2579
2580 // set up the variables that are used in the work-based scheme to
2581 // call the regular clock method
2582 _words_scanned = 0;
2583 _refs_reached = 0;
2584 recalculate_limits();
2585
2586 // clear all flags
2587 clear_has_aborted();
2588 _has_timed_out = false;
2589 _draining_satb_buffers = false;
2590
2591 ++_calls;
2592
2593 // Set up the bitmap and oop closures. Anything that uses them is
2594 // eventually called from this method, so it is OK to allocate these
2595 // statically.
2596 G1CMBitMapClosure bitmap_closure(this, _cm);
2597 G1CMOopClosure cm_oop_closure(_g1h, this);
2598 set_cm_oop_closure(&cm_oop_closure);
2599
2600 if (_cm->has_overflown()) {
2601 // This can happen if the mark stack overflows during a GC pause
2602 // and this task, after a yield point, restarts. We have to abort
2603 // as we need to get into the overflow protocol which happens
2604 // right at the end of this task.
2605 set_has_aborted();
2606 }
2607
2608 // First drain any available SATB buffers. After this, we will not
2609 // look at SATB buffers before the next invocation of this method.
2610 // If enough completed SATB buffers are queued up, the regular clock
2611 // will abort this task so that it restarts.
2612 drain_satb_buffers();
2613 // ...then partially drain the local queue and the global stack
2614 drain_local_queue(true);
2615 drain_global_stack(true);
2616
2617 do {
2618 if (!has_aborted() && _curr_region != NULL) {
2619 // This means that we're already holding on to a region.
2620 assert(_finger != NULL, "if region is not NULL, then the finger "
2621 "should not be NULL either");
2622
2623 // We might have restarted this task after an evacuation pause
2624 // which might have evacuated the region we're holding on to
2625 // underneath our feet. Let's read its limit again to make sure
2626 // that we do not iterate over a region of the heap that
2627 // contains garbage (update_region_limit() will also move
2628 // _finger to the start of the region if it is found empty).
2629 update_region_limit();
2630 // We will start from _finger not from the start of the region,
2631 // as we might be restarting this task after aborting half-way
2632 // through scanning this region. In this case, _finger points to
2633 // the address where we last found a marked object. If this is a
2634 // fresh region, _finger points to start().
2635 MemRegion mr = MemRegion(_finger, _region_limit);
2636
2637 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2638 "humongous regions should go around loop once only");
2639
2640 // Some special cases:
2641 // If the memory region is empty, we can just give up the region.
2642 // If the current region is humongous then we only need to check
2643 // the bitmap for the bit associated with the start of the object,
2644 // scan the object if it's live, and give up the region.
2645 // Otherwise, let's iterate over the bitmap of the part of the region
2646 // that is left.
2647 // If the iteration is successful, give up the region.
2648 if (mr.is_empty()) {
2649 giveup_current_region();
2650 regular_clock_call();
2651 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2652 if (_next_mark_bitmap->is_marked(mr.start())) {
2653 // The object is marked - apply the closure
2654 bitmap_closure.do_addr(mr.start());
2655 }
2656 // Even if this task aborted while scanning the humongous object
2657 // we can (and should) give up the current region.
2658 giveup_current_region();
2659 regular_clock_call();
2660 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2661 giveup_current_region();
2662 regular_clock_call();
2663 } else {
2664 assert(has_aborted(), "currently the only way to do so");
2665 // The only way to abort the bitmap iteration is to return
2666 // false from the do_bit() method. However, inside the
2667 // do_bit() method we move the _finger to point to the
2668 // object currently being looked at. So, if we bail out, we
2669 // have definitely set _finger to something non-null.
2670 assert(_finger != NULL, "invariant");
2671
2672 // Region iteration was actually aborted. So now _finger
2673 // points to the address of the object we last scanned. If we
2674 // leave it there, when we restart this task, we will rescan
2675 // the object. It is easy to avoid this. We move the finger by
2676 // enough to point to the next possible object header.
2677 assert(_finger < _region_limit, "invariant");
2678 HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2679 // Check if bitmap iteration was aborted while scanning the last object
2680 if (new_finger >= _region_limit) {
2681 giveup_current_region();
2682 } else {
2683 move_finger_to(new_finger);
2684 }
2685 }
2686 }
2687 // At this point we have either completed iterating over the
2688 // region we were holding on to, or we have aborted.
2689
2690 // We then partially drain the local queue and the global stack.
2691 // (Do we really need this?)
2692 drain_local_queue(true);
2693 drain_global_stack(true);
2694
2695 // Read the note on the claim_region() method on why it might
2696 // return NULL with potentially more regions available for
2697 // claiming and why we have to check out_of_regions() to determine
2698 // whether we're done or not.
2699 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2700 // We are going to try to claim a new region. We should have
2701 // given up on the previous one.
2702 // Separated the asserts so that we know which one fires.
2703 assert(_curr_region == NULL, "invariant");
2704 assert(_finger == NULL, "invariant");
2705 assert(_region_limit == NULL, "invariant");
2706 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2707 if (claimed_region != NULL) {
2708 // Yes, we managed to claim one
2709 setup_for_region(claimed_region);
2710 assert(_curr_region == claimed_region, "invariant");
2711 }
2712 // It is important to call the regular clock here. It might take
2713 // a while to claim a region if, for example, we hit a large
2714 // block of empty regions. So we need to call the regular clock
2715 // method once round the loop to make sure it's called
2716 // frequently enough.
2717 regular_clock_call();
2718 }
2719
2720 if (!has_aborted() && _curr_region == NULL) {
2721 assert(_cm->out_of_regions(),
2722 "at this point we should be out of regions");
2723 }
2724 } while ( _curr_region != NULL && !has_aborted());
2725
2726 if (!has_aborted()) {
2727 // We cannot check whether the global stack is empty, since other
2728 // tasks might be pushing objects to it concurrently.
2729 assert(_cm->out_of_regions(),
2730 "at this point we should be out of regions");
2731 // Try to reduce the number of available SATB buffers so that
2732 // remark has less work to do.
2733 drain_satb_buffers();
2734 }
2735
2736 // Since we've done everything else, we can now totally drain the
2737 // local queue and global stack.
2738 drain_local_queue(false);
2739 drain_global_stack(false);
2740
2741 // Attempt at work stealing from other task's queues.
2742 if (do_stealing && !has_aborted()) {
2743 // We have not aborted. This means that we have finished all that
2744 // we could. Let's try to do some stealing...
2745
2746 // We cannot check whether the global stack is empty, since other
2747 // tasks might be pushing objects to it concurrently.
2748 assert(_cm->out_of_regions() && _task_queue->size() == 0,
2749 "only way to reach here");
2750 while (!has_aborted()) {
2751 G1TaskQueueEntry entry;
2752 if (_cm->try_stealing(_worker_id, entry)) {
2753 scan_task_entry(entry);
2754
2755 // And since we're towards the end, let's totally drain the
2756 // local queue and global stack.
2757 drain_local_queue(false);
2758 drain_global_stack(false);
2759 } else {
2760 break;
2761 }
2762 }
2763 }
2764
2765 // We still haven't aborted. Now, let's try to get into the
2766 // termination protocol.
2767 if (do_termination && !has_aborted()) {
2768 // We cannot check whether the global stack is empty, since other
2769 // tasks might be concurrently pushing objects on it.
2770 // Separated the asserts so that we know which one fires.
2771 assert(_cm->out_of_regions(), "only way to reach here");
2772 assert(_task_queue->size() == 0, "only way to reach here");
2773 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2774
2775 // The G1CMTask class also extends the TerminatorTerminator class,
2776 // hence its should_exit_termination() method will also decide
2777 // whether to exit the termination protocol or not.
2778 bool finished = (is_serial ||
2779 _cm->terminator()->offer_termination(this));
2780 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2781 _termination_time_ms +=
2782 termination_end_time_ms - _termination_start_time_ms;
2783
2784 if (finished) {
2785 // We're all done.
2786
2787 // We can now guarantee that the global stack is empty, since
2788 // all other tasks have finished. We separated the guarantees so
2789 // that, if a condition is false, we can immediately find out
2790 // which one.
2791 guarantee(_cm->out_of_regions(), "only way to reach here");
2792 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2793 guarantee(_task_queue->size() == 0, "only way to reach here");
2794 guarantee(!_cm->has_overflown(), "only way to reach here");
2795 } else {
2796 // Apparently there's more work to do. Let's abort this task. It
2797 // will restart it and we can hopefully find more things to do.
2798 set_has_aborted();
2799 }
2800 }
2801
2802 // Mainly for debugging purposes to make sure that a pointer to the
2803 // closure which was statically allocated in this frame doesn't
2804 // escape it by accident.
2805 set_cm_oop_closure(NULL);
2806 double end_time_ms = os::elapsedVTime() * 1000.0;
2807 double elapsed_time_ms = end_time_ms - _start_time_ms;
2808 // Update the step history.
2809 _step_times_ms.add(elapsed_time_ms);
2810
2811 if (has_aborted()) {
2812 // The task was aborted for some reason.
2813 if (_has_timed_out) {
2814 double diff_ms = elapsed_time_ms - _time_target_ms;
2815 // Keep statistics of how well we did with respect to hitting
2816 // our target only if we actually timed out (if we aborted for
2817 // other reasons, then the results might get skewed).
2818 _marking_step_diffs_ms.add(diff_ms);
2819 }
2820
2821 if (_cm->has_overflown()) {
2822 // This is the interesting one. We aborted because a global
2823 // overflow was raised. This means we have to restart the
2824 // marking phase and start iterating over regions. However, in
2825 // order to do this we have to make sure that all tasks stop
2826 // what they are doing and re-initialize in a safe manner. We
2827 // will achieve this with the use of two barrier sync points.
2828
2829 if (!is_serial) {
2830 // We only need to enter the sync barrier if being called
2831 // from a parallel context
2832 _cm->enter_first_sync_barrier(_worker_id);
2833
2834 // When we exit this sync barrier we know that all tasks have
2835 // stopped doing marking work. So, it's now safe to
2836 // re-initialize our data structures.
2837 }
2838
2839 clear_region_fields();
2840 flush_mark_stats_cache();
2841
2842 if (!is_serial) {
2843 // If we're executing the concurrent phase of marking, reset the marking
2844 // state; otherwise the marking state is reset after reference processing,
2845 // during the remark pause.
2846 // If we reset here as a result of an overflow during the remark we will
2847 // see assertion failures from any subsequent set_concurrency_and_phase()
2848 // calls.
2849 if (_cm->concurrent() && _worker_id == 0) {
2850 // Worker 0 is responsible for clearing the global data structures because
2851 // of an overflow. During STW we should not clear the overflow flag (in
2852 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2853 // method to abort the pause and restart concurrent marking.
2854 _cm->reset_marking_for_restart();
2855
2856 log_info(gc, marking)("Concurrent Mark reset for overflow");
2857 }
2858
2859 // ...and enter the second barrier.
2860 _cm->enter_second_sync_barrier(_worker_id);
2861 }
2862 // At this point, if we're during the concurrent phase of
2863 // marking, everything has been re-initialized and we're
2864 // ready to restart.
2865 }
2866 }
2867 }
2868
2869 G1CMTask::G1CMTask(uint worker_id,
2870 G1ConcurrentMark* cm,
2871 G1CMTaskQueue* task_queue,
2872 G1RegionMarkStats* mark_stats,
2873 uint max_regions) :
2874 _objArray_processor(this),
2875 _worker_id(worker_id),
2876 _g1h(G1CollectedHeap::heap()),
2877 _cm(cm),
2878 _next_mark_bitmap(NULL),
2879 _task_queue(task_queue),
2880 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2881 _calls(0),
2882 _time_target_ms(0.0),
2883 _start_time_ms(0.0),
2884 _cm_oop_closure(NULL),
2885 _curr_region(NULL),
2886 _finger(NULL),
2887 _region_limit(NULL),
2888 _words_scanned(0),
2889 _words_scanned_limit(0),
2890 _real_words_scanned_limit(0),
2891 _refs_reached(0),
2892 _refs_reached_limit(0),
2893 _real_refs_reached_limit(0),
2894 _has_aborted(false),
2895 _has_timed_out(false),
2896 _draining_satb_buffers(false),
2897 _step_times_ms(),
2898 _elapsed_time_ms(0.0),
2899 _termination_time_ms(0.0),
2900 _termination_start_time_ms(0.0),
2901 _marking_step_diffs_ms()
2902 {
2903 guarantee(task_queue != NULL, "invariant");
2904
2905 _marking_step_diffs_ms.add(0.5);
2906 }
2907
2908 // These are formatting macros that are used below to ensure
2909 // consistent formatting. The *_H_* versions are used to format the
2910 // header for a particular value and they should be kept consistent
2911 // with the corresponding macro. Also note that most of the macros add
2912 // the necessary white space (as a prefix) which makes them a bit
2913 // easier to compose.
2914
2915 // All the output lines are prefixed with this string to be able to
2916 // identify them easily in a large log file.
2917 #define G1PPRL_LINE_PREFIX "###"
2918
2919 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
2920 #ifdef _LP64
2921 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
2922 #else // _LP64
2923 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
2924 #endif // _LP64
2925
2926 // For per-region info
2927 #define G1PPRL_TYPE_FORMAT " %-4s"
2928 #define G1PPRL_TYPE_H_FORMAT " %4s"
2929 #define G1PPRL_STATE_FORMAT " %-5s"
2930 #define G1PPRL_STATE_H_FORMAT " %5s"
2931 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
2932 #define G1PPRL_BYTE_H_FORMAT " %9s"
2933 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
2934 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
2935
2936 // For summary info
2937 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
2938 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
2939 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
2940 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2941
2942 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2943 _total_used_bytes(0), _total_capacity_bytes(0),
2944 _total_prev_live_bytes(0), _total_next_live_bytes(0),
2945 _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2946 {
2947 if (!log_is_enabled(Trace, gc, liveness)) {
2948 return;
2949 }
2950
2951 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2952 MemRegion g1_reserved = g1h->g1_reserved();
2953 double now = os::elapsedTime();
2954
2955 // Print the header of the output.
2956 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2957 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2958 G1PPRL_SUM_ADDR_FORMAT("reserved")
2959 G1PPRL_SUM_BYTE_FORMAT("region-size"),
2960 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2961 HeapRegion::GrainBytes);
2962 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2963 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2964 G1PPRL_TYPE_H_FORMAT
2965 G1PPRL_ADDR_BASE_H_FORMAT
2966 G1PPRL_BYTE_H_FORMAT
2967 G1PPRL_BYTE_H_FORMAT
2968 G1PPRL_BYTE_H_FORMAT
2969 G1PPRL_DOUBLE_H_FORMAT
2970 G1PPRL_BYTE_H_FORMAT
2971 G1PPRL_STATE_H_FORMAT
2972 G1PPRL_BYTE_H_FORMAT,
2973 "type", "address-range",
2974 "used", "prev-live", "next-live", "gc-eff",
2975 "remset", "state", "code-roots");
2976 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2977 G1PPRL_TYPE_H_FORMAT
2978 G1PPRL_ADDR_BASE_H_FORMAT
2979 G1PPRL_BYTE_H_FORMAT
2980 G1PPRL_BYTE_H_FORMAT
2981 G1PPRL_BYTE_H_FORMAT
2982 G1PPRL_DOUBLE_H_FORMAT
2983 G1PPRL_BYTE_H_FORMAT
2984 G1PPRL_STATE_H_FORMAT
2985 G1PPRL_BYTE_H_FORMAT,
2986 "", "",
2987 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2988 "(bytes)", "", "(bytes)");
2989 }
2990
2991 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2992 if (!log_is_enabled(Trace, gc, liveness)) {
2993 return false;
2994 }
2995
2996 const char* type = r->get_type_str();
2997 HeapWord* bottom = r->bottom();
2998 HeapWord* end = r->end();
2999 size_t capacity_bytes = r->capacity();
3000 size_t used_bytes = r->used();
3001 size_t prev_live_bytes = r->live_bytes();
3002 size_t next_live_bytes = r->next_live_bytes();
3003 double gc_eff = r->gc_efficiency();
3004 size_t remset_bytes = r->rem_set()->mem_size();
3005 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3006 const char* remset_type = r->rem_set()->get_short_state_str();
3007
3008 _total_used_bytes += used_bytes;
3009 _total_capacity_bytes += capacity_bytes;
3010 _total_prev_live_bytes += prev_live_bytes;
3011 _total_next_live_bytes += next_live_bytes;
3012 _total_remset_bytes += remset_bytes;
3013 _total_strong_code_roots_bytes += strong_code_roots_bytes;
3014
3015 // Print a line for this particular region.
3016 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3017 G1PPRL_TYPE_FORMAT
3018 G1PPRL_ADDR_BASE_FORMAT
3019 G1PPRL_BYTE_FORMAT
3020 G1PPRL_BYTE_FORMAT
3021 G1PPRL_BYTE_FORMAT
3022 G1PPRL_DOUBLE_FORMAT
3023 G1PPRL_BYTE_FORMAT
3024 G1PPRL_STATE_FORMAT
3025 G1PPRL_BYTE_FORMAT,
3026 type, p2i(bottom), p2i(end),
3027 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3028 remset_bytes, remset_type, strong_code_roots_bytes);
3029
3030 return false;
3031 }
3032
3033 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3034 if (!log_is_enabled(Trace, gc, liveness)) {
3035 return;
3036 }
3037
3038 // add static memory usages to remembered set sizes
3039 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3040 // Print the footer of the output.
3041 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3042 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3043 " SUMMARY"
3044 G1PPRL_SUM_MB_FORMAT("capacity")
3045 G1PPRL_SUM_MB_PERC_FORMAT("used")
3046 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3047 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3048 G1PPRL_SUM_MB_FORMAT("remset")
3049 G1PPRL_SUM_MB_FORMAT("code-roots"),
3050 bytes_to_mb(_total_capacity_bytes),
3051 bytes_to_mb(_total_used_bytes),
3052 percent_of(_total_used_bytes, _total_capacity_bytes),
3053 bytes_to_mb(_total_prev_live_bytes),
3054 percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3055 bytes_to_mb(_total_next_live_bytes),
3056 percent_of(_total_next_live_bytes, _total_capacity_bytes),
3057 bytes_to_mb(_total_remset_bytes),
3058 bytes_to_mb(_total_strong_code_roots_bytes));
3059 }
--- EOF ---