Print this page
JDK-8236073 G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoaderDataGraph.hpp"
27 27 #include "code/codeCache.hpp"
28 28 #include "gc/g1/g1BarrierSet.hpp"
29 29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 30 #include "gc/g1/g1CollectorState.hpp"
31 31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 33 #include "gc/g1/g1DirtyCardQueue.hpp"
34 34 #include "gc/g1/g1HeapVerifier.hpp"
35 35 #include "gc/g1/g1OopClosures.inline.hpp"
36 36 #include "gc/g1/g1Policy.hpp"
37 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
38 38 #include "gc/g1/g1StringDedup.hpp"
39 39 #include "gc/g1/g1ThreadLocalData.hpp"
40 40 #include "gc/g1/g1Trace.hpp"
41 41 #include "gc/g1/heapRegion.inline.hpp"
42 42 #include "gc/g1/heapRegionRemSet.hpp"
43 43 #include "gc/g1/heapRegionSet.inline.hpp"
44 44 #include "gc/shared/gcId.hpp"
45 45 #include "gc/shared/gcTimer.hpp"
46 46 #include "gc/shared/gcTraceTime.inline.hpp"
47 47 #include "gc/shared/gcVMOperations.hpp"
48 48 #include "gc/shared/genOopClosures.inline.hpp"
49 49 #include "gc/shared/referencePolicy.hpp"
50 50 #include "gc/shared/strongRootsScope.hpp"
51 51 #include "gc/shared/suspendibleThreadSet.hpp"
52 52 #include "gc/shared/taskqueue.inline.hpp"
53 53 #include "gc/shared/weakProcessor.inline.hpp"
54 54 #include "gc/shared/workerPolicy.hpp"
55 55 #include "include/jvm.h"
56 56 #include "logging/log.hpp"
57 57 #include "memory/allocation.hpp"
58 58 #include "memory/iterator.hpp"
59 59 #include "memory/resourceArea.hpp"
60 60 #include "memory/universe.hpp"
61 61 #include "oops/access.inline.hpp"
62 62 #include "oops/oop.inline.hpp"
63 63 #include "runtime/atomic.hpp"
64 64 #include "runtime/handles.inline.hpp"
65 65 #include "runtime/java.hpp"
66 66 #include "runtime/orderAccess.hpp"
67 67 #include "runtime/prefetch.inline.hpp"
68 68 #include "services/memTracker.hpp"
69 69 #include "utilities/align.hpp"
70 70 #include "utilities/growableArray.hpp"
71 71
72 72 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
73 73 assert(addr < _cm->finger(), "invariant");
74 74 assert(addr >= _task->finger(), "invariant");
75 75
76 76 // We move that task's local finger along.
77 77 _task->move_finger_to(addr);
78 78
79 79 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
80 80 // we only partially drain the local queue and global stack
81 81 _task->drain_local_queue(true);
82 82 _task->drain_global_stack(true);
83 83
84 84 // if the has_aborted flag has been raised, we need to bail out of
85 85 // the iteration
86 86 return !_task->has_aborted();
87 87 }
88 88
89 89 G1CMMarkStack::G1CMMarkStack() :
90 90 _max_chunk_capacity(0),
91 91 _base(NULL),
92 92 _chunk_capacity(0) {
93 93 set_empty();
94 94 }
95 95
96 96 bool G1CMMarkStack::resize(size_t new_capacity) {
97 97 assert(is_empty(), "Only resize when stack is empty.");
98 98 assert(new_capacity <= _max_chunk_capacity,
99 99 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
100 100
101 101 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
102 102
103 103 if (new_base == NULL) {
104 104 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
105 105 return false;
106 106 }
107 107 // Release old mapping.
108 108 if (_base != NULL) {
109 109 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
110 110 }
111 111
112 112 _base = new_base;
113 113 _chunk_capacity = new_capacity;
114 114 set_empty();
115 115
116 116 return true;
117 117 }
118 118
119 119 size_t G1CMMarkStack::capacity_alignment() {
120 120 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
121 121 }
122 122
123 123 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
124 124 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
125 125
126 126 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
127 127
128 128 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
129 129 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
130 130
131 131 guarantee(initial_chunk_capacity <= _max_chunk_capacity,
132 132 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
133 133 _max_chunk_capacity,
134 134 initial_chunk_capacity);
135 135
136 136 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
137 137 initial_chunk_capacity, _max_chunk_capacity);
138 138
139 139 return resize(initial_chunk_capacity);
140 140 }
141 141
142 142 void G1CMMarkStack::expand() {
143 143 if (_chunk_capacity == _max_chunk_capacity) {
144 144 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
145 145 return;
146 146 }
147 147 size_t old_capacity = _chunk_capacity;
148 148 // Double capacity if possible
149 149 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
150 150
151 151 if (resize(new_capacity)) {
152 152 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
153 153 old_capacity, new_capacity);
154 154 } else {
155 155 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
156 156 old_capacity, new_capacity);
157 157 }
158 158 }
159 159
160 160 G1CMMarkStack::~G1CMMarkStack() {
161 161 if (_base != NULL) {
162 162 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
163 163 }
164 164 }
165 165
166 166 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
167 167 elem->next = *list;
168 168 *list = elem;
169 169 }
170 170
171 171 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
172 172 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
173 173 add_chunk_to_list(&_chunk_list, elem);
174 174 _chunks_in_chunk_list++;
175 175 }
176 176
177 177 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
178 178 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
179 179 add_chunk_to_list(&_free_list, elem);
180 180 }
181 181
182 182 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
183 183 TaskQueueEntryChunk* result = *list;
184 184 if (result != NULL) {
185 185 *list = (*list)->next;
186 186 }
187 187 return result;
188 188 }
189 189
190 190 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
191 191 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
192 192 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
193 193 if (result != NULL) {
194 194 _chunks_in_chunk_list--;
195 195 }
196 196 return result;
197 197 }
198 198
199 199 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
200 200 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
201 201 return remove_chunk_from_list(&_free_list);
202 202 }
203 203
204 204 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
205 205 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
206 206 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
207 207 // wraparound of _hwm.
208 208 if (_hwm >= _chunk_capacity) {
209 209 return NULL;
210 210 }
211 211
212 212 size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
213 213 if (cur_idx >= _chunk_capacity) {
214 214 return NULL;
215 215 }
216 216
217 217 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
218 218 result->next = NULL;
219 219 return result;
220 220 }
221 221
222 222 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
223 223 // Get a new chunk.
224 224 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
225 225
226 226 if (new_chunk == NULL) {
227 227 // Did not get a chunk from the free list. Allocate from backing memory.
228 228 new_chunk = allocate_new_chunk();
229 229
230 230 if (new_chunk == NULL) {
231 231 return false;
232 232 }
233 233 }
234 234
235 235 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
236 236
237 237 add_chunk_to_chunk_list(new_chunk);
238 238
239 239 return true;
240 240 }
241 241
242 242 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
243 243 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
244 244
245 245 if (cur == NULL) {
246 246 return false;
247 247 }
248 248
249 249 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
250 250
251 251 add_chunk_to_free_list(cur);
252 252 return true;
253 253 }
254 254
255 255 void G1CMMarkStack::set_empty() {
256 256 _chunks_in_chunk_list = 0;
257 257 _hwm = 0;
258 258 _chunk_list = NULL;
259 259 _free_list = NULL;
260 260 }
261 261
262 262 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
263 263 _root_regions(NULL),
264 264 _max_regions(max_regions),
265 265 _num_root_regions(0),
266 266 _claimed_root_regions(0),
267 267 _scan_in_progress(false),
268 268 _should_abort(false) {
269 269 _root_regions = new MemRegion[_max_regions];
270 270 if (_root_regions == NULL) {
271 271 vm_exit_during_initialization("Could not allocate root MemRegion set.");
272 272 }
273 273 }
274 274
275 275 G1CMRootMemRegions::~G1CMRootMemRegions() {
276 276 delete[] _root_regions;
277 277 }
278 278
279 279 void G1CMRootMemRegions::reset() {
280 280 _num_root_regions = 0;
281 281 }
282 282
283 283 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
284 284 assert_at_safepoint();
285 285 size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
286 286 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
287 287 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
288 288 "end (" PTR_FORMAT ")", p2i(start), p2i(end));
289 289 _root_regions[idx].set_start(start);
290 290 _root_regions[idx].set_end(end);
291 291 }
292 292
293 293 void G1CMRootMemRegions::prepare_for_scan() {
294 294 assert(!scan_in_progress(), "pre-condition");
295 295
296 296 _scan_in_progress = _num_root_regions > 0;
297 297
298 298 _claimed_root_regions = 0;
299 299 _should_abort = false;
300 300 }
301 301
302 302 const MemRegion* G1CMRootMemRegions::claim_next() {
303 303 if (_should_abort) {
304 304 // If someone has set the should_abort flag, we return NULL to
305 305 // force the caller to bail out of their loop.
306 306 return NULL;
307 307 }
308 308
309 309 if (_claimed_root_regions >= _num_root_regions) {
310 310 return NULL;
311 311 }
312 312
313 313 size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
314 314 if (claimed_index < _num_root_regions) {
315 315 return &_root_regions[claimed_index];
316 316 }
317 317 return NULL;
318 318 }
319 319
320 320 uint G1CMRootMemRegions::num_root_regions() const {
321 321 return (uint)_num_root_regions;
322 322 }
323 323
324 324 void G1CMRootMemRegions::notify_scan_done() {
325 325 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
326 326 _scan_in_progress = false;
327 327 RootRegionScan_lock->notify_all();
328 328 }
329 329
330 330 void G1CMRootMemRegions::cancel_scan() {
331 331 notify_scan_done();
332 332 }
333 333
334 334 void G1CMRootMemRegions::scan_finished() {
335 335 assert(scan_in_progress(), "pre-condition");
336 336
337 337 if (!_should_abort) {
338 338 assert(_claimed_root_regions >= num_root_regions(),
339 339 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
340 340 _claimed_root_regions, num_root_regions());
341 341 }
342 342
343 343 notify_scan_done();
344 344 }
345 345
346 346 bool G1CMRootMemRegions::wait_until_scan_finished() {
347 347 if (!scan_in_progress()) {
348 348 return false;
349 349 }
350 350
351 351 {
352 352 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
353 353 while (scan_in_progress()) {
354 354 ml.wait();
355 355 }
356 356 }
357 357 return true;
358 358 }
359 359
360 360 // Returns the maximum number of workers to be used in a concurrent
361 361 // phase based on the number of GC workers being used in a STW
362 362 // phase.
363 363 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
364 364 return MAX2((num_gc_workers + 2) / 4, 1U);
365 365 }
366 366
367 367 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
368 368 G1RegionToSpaceMapper* prev_bitmap_storage,
369 369 G1RegionToSpaceMapper* next_bitmap_storage) :
370 370 // _cm_thread set inside the constructor
371 371 _g1h(g1h),
372 372 _completed_initialization(false),
373 373
374 374 _mark_bitmap_1(),
375 375 _mark_bitmap_2(),
376 376 _prev_mark_bitmap(&_mark_bitmap_1),
377 377 _next_mark_bitmap(&_mark_bitmap_2),
378 378
379 379 _heap(_g1h->reserved_region()),
380 380
381 381 _root_regions(_g1h->max_regions()),
382 382
383 383 _global_mark_stack(),
384 384
385 385 // _finger set in set_non_marking_state
386 386
387 387 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
388 388 _max_num_tasks(ParallelGCThreads),
389 389 // _num_active_tasks set in set_non_marking_state()
390 390 // _tasks set inside the constructor
391 391
392 392 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
393 393 _terminator((int) _max_num_tasks, _task_queues),
394 394
395 395 _first_overflow_barrier_sync(),
396 396 _second_overflow_barrier_sync(),
397 397
398 398 _has_overflown(false),
399 399 _concurrent(false),
400 400 _has_aborted(false),
401 401 _restart_for_overflow(false),
402 402 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
403 403 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
404 404
405 405 // _verbose_level set below
406 406
407 407 _init_times(),
408 408 _remark_times(),
409 409 _remark_mark_times(),
410 410 _remark_weak_ref_times(),
411 411 _cleanup_times(),
412 412 _total_cleanup_time(0.0),
413 413
414 414 _accum_task_vtime(NULL),
415 415
416 416 _concurrent_workers(NULL),
417 417 _num_concurrent_workers(0),
418 418 _max_concurrent_workers(0),
419 419
420 420 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
421 421 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
422 422 {
423 423 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
424 424 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
425 425
426 426 // Create & start ConcurrentMark thread.
427 427 _cm_thread = new G1ConcurrentMarkThread(this);
428 428 if (_cm_thread->osthread() == NULL) {
429 429 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
430 430 }
431 431
432 432 assert(CGC_lock != NULL, "CGC_lock must be initialized");
433 433
434 434 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
435 435 // Calculate the number of concurrent worker threads by scaling
436 436 // the number of parallel GC threads.
437 437 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
438 438 FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
439 439 }
440 440
441 441 assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
442 442 if (ConcGCThreads > ParallelGCThreads) {
443 443 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
444 444 ConcGCThreads, ParallelGCThreads);
445 445 return;
446 446 }
447 447
448 448 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
449 449 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
450 450
451 451 _num_concurrent_workers = ConcGCThreads;
452 452 _max_concurrent_workers = _num_concurrent_workers;
453 453
454 454 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
455 455 _concurrent_workers->initialize_workers();
456 456
457 457 if (FLAG_IS_DEFAULT(MarkStackSize)) {
458 458 size_t mark_stack_size =
459 459 MIN2(MarkStackSizeMax,
460 460 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
461 461 // Verify that the calculated value for MarkStackSize is in range.
462 462 // It would be nice to use the private utility routine from Arguments.
463 463 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
464 464 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
465 465 "must be between 1 and " SIZE_FORMAT,
466 466 mark_stack_size, MarkStackSizeMax);
467 467 return;
468 468 }
469 469 FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
470 470 } else {
471 471 // Verify MarkStackSize is in range.
472 472 if (FLAG_IS_CMDLINE(MarkStackSize)) {
473 473 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
474 474 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
475 475 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
476 476 "must be between 1 and " SIZE_FORMAT,
477 477 MarkStackSize, MarkStackSizeMax);
478 478 return;
479 479 }
480 480 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
481 481 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
482 482 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
483 483 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
484 484 MarkStackSize, MarkStackSizeMax);
485 485 return;
486 486 }
487 487 }
488 488 }
489 489 }
490 490
491 491 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
492 492 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
493 493 }
494 494
495 495 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
496 496 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
497 497
498 498 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
499 499 _num_active_tasks = _max_num_tasks;
500 500
501 501 for (uint i = 0; i < _max_num_tasks; ++i) {
502 502 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
503 503 task_queue->initialize();
504 504 _task_queues->register_queue(i, task_queue);
505 505
506 506 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
507 507
508 508 _accum_task_vtime[i] = 0.0;
509 509 }
510 510
511 511 reset_at_marking_complete();
512 512 _completed_initialization = true;
513 513 }
514 514
515 515 void G1ConcurrentMark::reset() {
516 516 _has_aborted = false;
517 517
518 518 reset_marking_for_restart();
519 519
520 520 // Reset all tasks, since different phases will use different number of active
521 521 // threads. So, it's easiest to have all of them ready.
522 522 for (uint i = 0; i < _max_num_tasks; ++i) {
523 523 _tasks[i]->reset(_next_mark_bitmap);
524 524 }
525 525
526 526 uint max_regions = _g1h->max_regions();
527 527 for (uint i = 0; i < max_regions; i++) {
528 528 _top_at_rebuild_starts[i] = NULL;
529 529 _region_mark_stats[i].clear();
530 530 }
531 531 }
532 532
533 533 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
534 534 for (uint j = 0; j < _max_num_tasks; ++j) {
535 535 _tasks[j]->clear_mark_stats_cache(region_idx);
536 536 }
537 537 _top_at_rebuild_starts[region_idx] = NULL;
538 538 _region_mark_stats[region_idx].clear();
539 539 }
540 540
541 541 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
542 542 uint const region_idx = r->hrm_index();
543 543 if (r->is_humongous()) {
544 544 assert(r->is_starts_humongous(), "Got humongous continues region here");
545 545 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
546 546 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
547 547 clear_statistics_in_region(j);
548 548 }
549 549 } else {
550 550 clear_statistics_in_region(region_idx);
551 551 }
552 552 }
553 553
554 554 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
555 555 if (bitmap->is_marked(addr)) {
556 556 bitmap->clear(addr);
557 557 }
558 558 }
559 559
560 560 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
561 561 assert_at_safepoint_on_vm_thread();
562 562
563 563 // Need to clear all mark bits of the humongous object.
564 564 clear_mark_if_set(_prev_mark_bitmap, r->bottom());
565 565 clear_mark_if_set(_next_mark_bitmap, r->bottom());
566 566
567 567 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
568 568 return;
569 569 }
570 570
571 571 // Clear any statistics about the region gathered so far.
572 572 clear_statistics(r);
573 573 }
574 574
575 575 void G1ConcurrentMark::reset_marking_for_restart() {
576 576 _global_mark_stack.set_empty();
577 577
578 578 // Expand the marking stack, if we have to and if we can.
579 579 if (has_overflown()) {
580 580 _global_mark_stack.expand();
581 581
582 582 uint max_regions = _g1h->max_regions();
583 583 for (uint i = 0; i < max_regions; i++) {
584 584 _region_mark_stats[i].clear_during_overflow();
585 585 }
586 586 }
587 587
588 588 clear_has_overflown();
589 589 _finger = _heap.start();
590 590
591 591 for (uint i = 0; i < _max_num_tasks; ++i) {
592 592 G1CMTaskQueue* queue = _task_queues->queue(i);
593 593 queue->set_empty();
594 594 }
595 595 }
596 596
597 597 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
598 598 assert(active_tasks <= _max_num_tasks, "we should not have more");
599 599
600 600 _num_active_tasks = active_tasks;
601 601 // Need to update the three data structures below according to the
602 602 // number of active threads for this phase.
603 603 _terminator.terminator()->reset_for_reuse((int) active_tasks);
604 604 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
605 605 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
606 606 }
607 607
608 608 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
609 609 set_concurrency(active_tasks);
610 610
611 611 _concurrent = concurrent;
612 612
613 613 if (!concurrent) {
614 614 // At this point we should be in a STW phase, and completed marking.
615 615 assert_at_safepoint_on_vm_thread();
616 616 assert(out_of_regions(),
617 617 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
618 618 p2i(_finger), p2i(_heap.end()));
619 619 }
620 620 }
621 621
622 622 void G1ConcurrentMark::reset_at_marking_complete() {
623 623 // We set the global marking state to some default values when we're
624 624 // not doing marking.
625 625 reset_marking_for_restart();
626 626 _num_active_tasks = 0;
627 627 }
628 628
629 629 G1ConcurrentMark::~G1ConcurrentMark() {
630 630 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
631 631 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
632 632 // The G1ConcurrentMark instance is never freed.
633 633 ShouldNotReachHere();
634 634 }
635 635
636 636 class G1ClearBitMapTask : public AbstractGangTask {
637 637 public:
638 638 static size_t chunk_size() { return M; }
639 639
640 640 private:
641 641 // Heap region closure used for clearing the given mark bitmap.
642 642 class G1ClearBitmapHRClosure : public HeapRegionClosure {
643 643 private:
644 644 G1CMBitMap* _bitmap;
645 645 G1ConcurrentMark* _cm;
646 646 public:
647 647 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
648 648 }
649 649
650 650 virtual bool do_heap_region(HeapRegion* r) {
651 651 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
652 652
653 653 HeapWord* cur = r->bottom();
654 654 HeapWord* const end = r->end();
655 655
656 656 while (cur < end) {
657 657 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
658 658 _bitmap->clear_range(mr);
659 659
660 660 cur += chunk_size_in_words;
661 661
662 662 // Abort iteration if after yielding the marking has been aborted.
663 663 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
664 664 return true;
665 665 }
666 666 // Repeat the asserts from before the start of the closure. We will do them
667 667 // as asserts here to minimize their overhead on the product. However, we
668 668 // will have them as guarantees at the beginning / end of the bitmap
669 669 // clearing to get some checking in the product.
670 670 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
671 671 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
672 672 }
673 673 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
674 674
675 675 return false;
676 676 }
677 677 };
678 678
679 679 G1ClearBitmapHRClosure _cl;
680 680 HeapRegionClaimer _hr_claimer;
681 681 bool _suspendible; // If the task is suspendible, workers must join the STS.
682 682
683 683 public:
684 684 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
685 685 AbstractGangTask("G1 Clear Bitmap"),
686 686 _cl(bitmap, suspendible ? cm : NULL),
687 687 _hr_claimer(n_workers),
688 688 _suspendible(suspendible)
689 689 { }
690 690
691 691 void work(uint worker_id) {
692 692 SuspendibleThreadSetJoiner sts_join(_suspendible);
693 693 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
694 694 }
695 695
696 696 bool is_complete() {
697 697 return _cl.is_complete();
698 698 }
699 699 };
700 700
701 701 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
702 702 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
703 703
704 704 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
705 705 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
706 706
707 707 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
708 708
709 709 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
710 710
711 711 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
712 712 workers->run_task(&cl, num_workers);
713 713 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
714 714 }
715 715
716 716 void G1ConcurrentMark::cleanup_for_next_mark() {
717 717 // Make sure that the concurrent mark thread looks to still be in
718 718 // the current cycle.
719 719 guarantee(cm_thread()->during_cycle(), "invariant");
720 720
721 721 // We are finishing up the current cycle by clearing the next
722 722 // marking bitmap and getting it ready for the next cycle. During
723 723 // this time no other cycle can start. So, let's make sure that this
724 724 // is the case.
725 725 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
726 726
727 727 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
728 728
729 729 // Repeat the asserts from above.
730 730 guarantee(cm_thread()->during_cycle(), "invariant");
731 731 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
732 732 }
733 733
734 734 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
735 735 assert_at_safepoint_on_vm_thread();
736 736 clear_bitmap(_prev_mark_bitmap, workers, false);
737 737 }
738 738
739 739 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
740 740 public:
741 741 bool do_heap_region(HeapRegion* r) {
742 742 r->note_start_of_marking();
743 743 return false;
744 744 }
745 745 };
746 746
747 747 void G1ConcurrentMark::pre_initial_mark() {
748 748 assert_at_safepoint_on_vm_thread();
749 749
750 750 // Reset marking state.
751 751 reset();
752 752
753 753 // For each region note start of marking.
754 754 NoteStartOfMarkHRClosure startcl;
755 755 _g1h->heap_region_iterate(&startcl);
756 756
757 757 _root_regions.reset();
758 758 }
759 759
760 760
761 761 void G1ConcurrentMark::post_initial_mark() {
762 762 // Start Concurrent Marking weak-reference discovery.
763 763 ReferenceProcessor* rp = _g1h->ref_processor_cm();
764 764 // enable ("weak") refs discovery
765 765 rp->enable_discovery();
766 766 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
767 767
768 768 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
769 769 // This is the start of the marking cycle, we're expected all
770 770 // threads to have SATB queues with active set to false.
771 771 satb_mq_set.set_active_all_threads(true, /* new active value */
772 772 false /* expected_active */);
773 773
774 774 _root_regions.prepare_for_scan();
775 775
776 776 // update_g1_committed() will be called at the end of an evac pause
777 777 // when marking is on. So, it's also called at the end of the
778 778 // initial-mark pause to update the heap end, if the heap expands
779 779 // during it. No need to call it here.
780 780 }
781 781
782 782 /*
783 783 * Notice that in the next two methods, we actually leave the STS
784 784 * during the barrier sync and join it immediately afterwards. If we
785 785 * do not do this, the following deadlock can occur: one thread could
786 786 * be in the barrier sync code, waiting for the other thread to also
787 787 * sync up, whereas another one could be trying to yield, while also
788 788 * waiting for the other threads to sync up too.
789 789 *
790 790 * Note, however, that this code is also used during remark and in
791 791 * this case we should not attempt to leave / enter the STS, otherwise
792 792 * we'll either hit an assert (debug / fastdebug) or deadlock
793 793 * (product). So we should only leave / enter the STS if we are
794 794 * operating concurrently.
795 795 *
796 796 * Because the thread that does the sync barrier has left the STS, it
797 797 * is possible to be suspended for a Full GC or an evacuation pause
798 798 * could occur. This is actually safe, since the entering the sync
799 799 * barrier is one of the last things do_marking_step() does, and it
800 800 * doesn't manipulate any data structures afterwards.
801 801 */
802 802
803 803 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
804 804 bool barrier_aborted;
805 805 {
806 806 SuspendibleThreadSetLeaver sts_leave(concurrent());
807 807 barrier_aborted = !_first_overflow_barrier_sync.enter();
808 808 }
809 809
810 810 // at this point everyone should have synced up and not be doing any
811 811 // more work
812 812
813 813 if (barrier_aborted) {
814 814 // If the barrier aborted we ignore the overflow condition and
815 815 // just abort the whole marking phase as quickly as possible.
816 816 return;
817 817 }
818 818 }
819 819
820 820 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
821 821 SuspendibleThreadSetLeaver sts_leave(concurrent());
822 822 _second_overflow_barrier_sync.enter();
823 823
824 824 // at this point everything should be re-initialized and ready to go
825 825 }
826 826
827 827 class G1CMConcurrentMarkingTask : public AbstractGangTask {
828 828 G1ConcurrentMark* _cm;
829 829
830 830 public:
831 831 void work(uint worker_id) {
832 832 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
833 833 ResourceMark rm;
834 834
835 835 double start_vtime = os::elapsedVTime();
836 836
837 837 {
838 838 SuspendibleThreadSetJoiner sts_join;
839 839
840 840 assert(worker_id < _cm->active_tasks(), "invariant");
841 841
842 842 G1CMTask* task = _cm->task(worker_id);
843 843 task->record_start_time();
844 844 if (!_cm->has_aborted()) {
845 845 do {
846 846 task->do_marking_step(G1ConcMarkStepDurationMillis,
847 847 true /* do_termination */,
848 848 false /* is_serial*/);
849 849
850 850 _cm->do_yield_check();
851 851 } while (!_cm->has_aborted() && task->has_aborted());
852 852 }
853 853 task->record_end_time();
854 854 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
855 855 }
856 856
857 857 double end_vtime = os::elapsedVTime();
858 858 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
859 859 }
860 860
861 861 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
862 862 AbstractGangTask("Concurrent Mark"), _cm(cm) { }
863 863
864 864 ~G1CMConcurrentMarkingTask() { }
865 865 };
866 866
867 867 uint G1ConcurrentMark::calc_active_marking_workers() {
868 868 uint result = 0;
869 869 if (!UseDynamicNumberOfGCThreads ||
870 870 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
871 871 !ForceDynamicNumberOfGCThreads)) {
872 872 result = _max_concurrent_workers;
873 873 } else {
874 874 result =
875 875 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
876 876 1, /* Minimum workers */
877 877 _num_concurrent_workers,
878 878 Threads::number_of_non_daemon_threads());
879 879 // Don't scale the result down by scale_concurrent_workers() because
880 880 // that scaling has already gone into "_max_concurrent_workers".
881 881 }
882 882 assert(result > 0 && result <= _max_concurrent_workers,
883 883 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
884 884 _max_concurrent_workers, result);
885 885 return result;
886 886 }
887 887
888 888 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
889 889 #ifdef ASSERT
890 890 HeapWord* last = region->last();
891 891 HeapRegion* hr = _g1h->heap_region_containing(last);
892 892 assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
893 893 "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
894 894 assert(hr->next_top_at_mark_start() == region->start(),
895 895 "MemRegion start should be equal to nTAMS");
896 896 #endif
897 897
898 898 G1RootRegionScanClosure cl(_g1h, this, worker_id);
899 899
900 900 const uintx interval = PrefetchScanIntervalInBytes;
901 901 HeapWord* curr = region->start();
902 902 const HeapWord* end = region->end();
903 903 while (curr < end) {
904 904 Prefetch::read(curr, interval);
905 905 oop obj = oop(curr);
906 906 int size = obj->oop_iterate_size(&cl);
907 907 assert(size == obj->size(), "sanity");
908 908 curr += size;
909 909 }
910 910 }
911 911
912 912 class G1CMRootRegionScanTask : public AbstractGangTask {
913 913 G1ConcurrentMark* _cm;
914 914 public:
915 915 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
916 916 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
917 917
918 918 void work(uint worker_id) {
919 919 assert(Thread::current()->is_ConcurrentGC_thread(),
920 920 "this should only be done by a conc GC thread");
921 921
922 922 G1CMRootMemRegions* root_regions = _cm->root_regions();
923 923 const MemRegion* region = root_regions->claim_next();
924 924 while (region != NULL) {
925 925 _cm->scan_root_region(region, worker_id);
926 926 region = root_regions->claim_next();
927 927 }
928 928 }
929 929 };
930 930
931 931 void G1ConcurrentMark::scan_root_regions() {
932 932 // scan_in_progress() will have been set to true only if there was
933 933 // at least one root region to scan. So, if it's false, we
934 934 // should not attempt to do any further work.
935 935 if (root_regions()->scan_in_progress()) {
936 936 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
937 937
938 938 _num_concurrent_workers = MIN2(calc_active_marking_workers(),
939 939 // We distribute work on a per-region basis, so starting
940 940 // more threads than that is useless.
941 941 root_regions()->num_root_regions());
942 942 assert(_num_concurrent_workers <= _max_concurrent_workers,
943 943 "Maximum number of marking threads exceeded");
944 944
945 945 G1CMRootRegionScanTask task(this);
946 946 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
947 947 task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
948 948 _concurrent_workers->run_task(&task, _num_concurrent_workers);
949 949
950 950 // It's possible that has_aborted() is true here without actually
951 951 // aborting the survivor scan earlier. This is OK as it's
952 952 // mainly used for sanity checking.
953 953 root_regions()->scan_finished();
954 954 }
955 955 }
956 956
957 957 void G1ConcurrentMark::concurrent_cycle_start() {
958 958 _gc_timer_cm->register_gc_start();
959 959
960 960 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
961 961
962 962 _g1h->trace_heap_before_gc(_gc_tracer_cm);
963 963 }
964 964
965 965 void G1ConcurrentMark::concurrent_cycle_end() {
966 966 _g1h->collector_state()->set_clearing_next_bitmap(false);
967 967
968 968 _g1h->trace_heap_after_gc(_gc_tracer_cm);
969 969
970 970 if (has_aborted()) {
971 971 log_info(gc, marking)("Concurrent Mark Abort");
972 972 _gc_tracer_cm->report_concurrent_mode_failure();
973 973 }
974 974
975 975 _gc_timer_cm->register_gc_end();
976 976
977 977 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
978 978 }
979 979
980 980 void G1ConcurrentMark::mark_from_roots() {
981 981 _restart_for_overflow = false;
982 982
983 983 _num_concurrent_workers = calc_active_marking_workers();
984 984
985 985 uint active_workers = MAX2(1U, _num_concurrent_workers);
986 986
987 987 // Setting active workers is not guaranteed since fewer
988 988 // worker threads may currently exist and more may not be
989 989 // available.
990 990 active_workers = _concurrent_workers->update_active_workers(active_workers);
991 991 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
992 992
993 993 // Parallel task terminator is set in "set_concurrency_and_phase()"
994 994 set_concurrency_and_phase(active_workers, true /* concurrent */);
995 995
996 996 G1CMConcurrentMarkingTask marking_task(this);
997 997 _concurrent_workers->run_task(&marking_task);
998 998 print_stats();
999 999 }
1000 1000
1001 1001 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1002 1002 G1HeapVerifier* verifier = _g1h->verifier();
1003 1003
1004 1004 verifier->verify_region_sets_optional();
1005 1005
1006 1006 if (VerifyDuringGC) {
1007 1007 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1008 1008
1009 1009 size_t const BufLen = 512;
1010 1010 char buffer[BufLen];
1011 1011
1012 1012 jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1013 1013 verifier->verify(type, vo, buffer);
1014 1014 }
1015 1015
1016 1016 verifier->check_bitmaps(caller);
1017 1017 }
1018 1018
1019 1019 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1020 1020 G1CollectedHeap* _g1h;
1021 1021 G1ConcurrentMark* _cm;
1022 1022 HeapRegionClaimer _hrclaimer;
1023 1023 uint volatile _total_selected_for_rebuild;
1024 1024
1025 1025 G1PrintRegionLivenessInfoClosure _cl;
1026 1026
1027 1027 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1028 1028 G1CollectedHeap* _g1h;
1029 1029 G1ConcurrentMark* _cm;
1030 1030
1031 1031 G1PrintRegionLivenessInfoClosure* _cl;
1032 1032
1033 1033 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1034 1034
1035 1035 void update_remset_before_rebuild(HeapRegion* hr) {
1036 1036 G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1037 1037
1038 1038 bool selected_for_rebuild;
1039 1039 if (hr->is_humongous()) {
1040 1040 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1041 1041 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1042 1042 } else {
1043 1043 size_t const live_bytes = _cm->liveness(hr->hrm_index());
1044 1044 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1045 1045 }
1046 1046 if (selected_for_rebuild) {
1047 1047 _num_regions_selected_for_rebuild++;
1048 1048 }
1049 1049 _cm->update_top_at_rebuild_start(hr);
1050 1050 }
1051 1051
1052 1052 // Distribute the given words across the humongous object starting with hr and
1053 1053 // note end of marking.
1054 1054 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1055 1055 uint const region_idx = hr->hrm_index();
1056 1056 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1057 1057 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1058 1058
1059 1059 // "Distributing" zero words means that we only note end of marking for these
1060 1060 // regions.
1061 1061 assert(marked_words == 0 || obj_size_in_words == marked_words,
1062 1062 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1063 1063 obj_size_in_words, marked_words);
1064 1064
1065 1065 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1066 1066 HeapRegion* const r = _g1h->region_at(i);
1067 1067 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1068 1068
1069 1069 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1070 1070 words_to_add, i, r->get_type_str());
1071 1071 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1072 1072 marked_words -= words_to_add;
1073 1073 }
1074 1074 assert(marked_words == 0,
1075 1075 SIZE_FORMAT " words left after distributing space across %u regions",
1076 1076 marked_words, num_regions_in_humongous);
1077 1077 }
1078 1078
1079 1079 void update_marked_bytes(HeapRegion* hr) {
1080 1080 uint const region_idx = hr->hrm_index();
1081 1081 size_t const marked_words = _cm->liveness(region_idx);
1082 1082 // The marking attributes the object's size completely to the humongous starts
1083 1083 // region. We need to distribute this value across the entire set of regions a
1084 1084 // humongous object spans.
1085 1085 if (hr->is_humongous()) {
1086 1086 assert(hr->is_starts_humongous() || marked_words == 0,
1087 1087 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1088 1088 marked_words, region_idx, hr->get_type_str());
1089 1089 if (hr->is_starts_humongous()) {
1090 1090 distribute_marked_bytes(hr, marked_words);
1091 1091 }
1092 1092 } else {
1093 1093 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1094 1094 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1095 1095 }
1096 1096 }
1097 1097
1098 1098 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1099 1099 hr->add_to_marked_bytes(marked_bytes);
1100 1100 _cl->do_heap_region(hr);
1101 1101 hr->note_end_of_marking();
1102 1102 }
1103 1103
1104 1104 public:
1105 1105 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1106 1106 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1107 1107
1108 1108 virtual bool do_heap_region(HeapRegion* r) {
1109 1109 update_remset_before_rebuild(r);
1110 1110 update_marked_bytes(r);
1111 1111
1112 1112 return false;
1113 1113 }
1114 1114
1115 1115 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1116 1116 };
1117 1117
1118 1118 public:
1119 1119 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1120 1120 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1121 1121 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1122 1122
1123 1123 virtual void work(uint worker_id) {
1124 1124 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1125 1125 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1126 1126 Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1127 1127 }
1128 1128
1129 1129 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1130 1130
1131 1131 // Number of regions for which roughly one thread should be spawned for this work.
1132 1132 static const uint RegionsPerThread = 384;
1133 1133 };
1134 1134
1135 1135 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1136 1136 G1CollectedHeap* _g1h;
1137 1137 public:
1138 1138 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1139 1139
1140 1140 virtual bool do_heap_region(HeapRegion* r) {
1141 1141 _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1142 1142 return false;
1143 1143 }
1144 1144 };
1145 1145
1146 1146 void G1ConcurrentMark::remark() {
1147 1147 assert_at_safepoint_on_vm_thread();
1148 1148
1149 1149 // If a full collection has happened, we should not continue. However we might
1150 1150 // have ended up here as the Remark VM operation has been scheduled already.
1151 1151 if (has_aborted()) {
1152 1152 return;
1153 1153 }
1154 1154
1155 1155 G1Policy* policy = _g1h->policy();
1156 1156 policy->record_concurrent_mark_remark_start();
1157 1157
1158 1158 double start = os::elapsedTime();
1159 1159
1160 1160 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1161 1161
1162 1162 {
1163 1163 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1164 1164 finalize_marking();
1165 1165 }
1166 1166
1167 1167 double mark_work_end = os::elapsedTime();
1168 1168
1169 1169 bool const mark_finished = !has_overflown();
1170 1170 if (mark_finished) {
1171 1171 weak_refs_work(false /* clear_all_soft_refs */);
1172 1172
1173 1173 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1174 1174 // We're done with marking.
1175 1175 // This is the end of the marking cycle, we're expected all
1176 1176 // threads to have SATB queues with active set to true.
1177 1177 satb_mq_set.set_active_all_threads(false, /* new active value */
1178 1178 true /* expected_active */);
1179 1179
1180 1180 {
1181 1181 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1182 1182 flush_all_task_caches();
1183 1183 }
1184 1184
1185 1185 // Install newly created mark bitmap as "prev".
1186 1186 swap_mark_bitmaps();
1187 1187 {
1188 1188 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1189 1189
1190 1190 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1191 1191 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1192 1192 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1193 1193
1194 1194 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1195 1195 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1196 1196 _g1h->workers()->run_task(&cl, num_workers);
1197 1197
1198 1198 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1199 1199 _g1h->num_regions(), cl.total_selected_for_rebuild());
1200 1200 }
1201 1201 {
↓ open down ↓ |
1201 lines elided |
↑ open up ↑ |
1202 1202 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1203 1203 reclaim_empty_regions();
1204 1204 }
1205 1205
1206 1206 // Clean out dead classes
1207 1207 if (ClassUnloadingWithConcurrentMark) {
1208 1208 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1209 1209 ClassLoaderDataGraph::purge();
1210 1210 }
1211 1211
1212 - _g1h->resize_heap_if_necessary();
1212 + _g1h->shrink_heap_after_concurrent_mark();
1213 1213
1214 1214 compute_new_sizes();
1215 1215
1216 1216 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1217 1217
1218 1218 assert(!restart_for_overflow(), "sanity");
1219 1219 // Completely reset the marking state since marking completed
1220 1220 reset_at_marking_complete();
1221 1221 } else {
1222 1222 // We overflowed. Restart concurrent marking.
1223 1223 _restart_for_overflow = true;
1224 1224
1225 1225 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1226 1226
1227 1227 // Clear the marking state because we will be restarting
1228 1228 // marking due to overflowing the global mark stack.
1229 1229 reset_marking_for_restart();
1230 1230 }
1231 1231
1232 1232 {
1233 1233 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1234 1234 report_object_count(mark_finished);
1235 1235 }
1236 1236
1237 1237 // Statistics
1238 1238 double now = os::elapsedTime();
1239 1239 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1240 1240 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1241 1241 _remark_times.add((now - start) * 1000.0);
1242 1242
1243 1243 policy->record_concurrent_mark_remark_end();
1244 1244 }
1245 1245
1246 1246 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1247 1247 // Per-region work during the Cleanup pause.
1248 1248 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1249 1249 G1CollectedHeap* _g1h;
1250 1250 size_t _freed_bytes;
1251 1251 FreeRegionList* _local_cleanup_list;
1252 1252 uint _old_regions_removed;
1253 1253 uint _humongous_regions_removed;
1254 1254
1255 1255 public:
1256 1256 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1257 1257 FreeRegionList* local_cleanup_list) :
1258 1258 _g1h(g1h),
1259 1259 _freed_bytes(0),
1260 1260 _local_cleanup_list(local_cleanup_list),
1261 1261 _old_regions_removed(0),
1262 1262 _humongous_regions_removed(0) { }
1263 1263
1264 1264 size_t freed_bytes() { return _freed_bytes; }
1265 1265 const uint old_regions_removed() { return _old_regions_removed; }
1266 1266 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1267 1267
1268 1268 bool do_heap_region(HeapRegion *hr) {
1269 1269 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1270 1270 _freed_bytes += hr->used();
1271 1271 hr->set_containing_set(NULL);
1272 1272 if (hr->is_humongous()) {
1273 1273 _humongous_regions_removed++;
1274 1274 _g1h->free_humongous_region(hr, _local_cleanup_list);
1275 1275 } else {
1276 1276 _old_regions_removed++;
1277 1277 _g1h->free_region(hr, _local_cleanup_list);
1278 1278 }
1279 1279 hr->clear_cardtable();
1280 1280 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1281 1281 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1282 1282 }
1283 1283
1284 1284 return false;
1285 1285 }
1286 1286 };
1287 1287
1288 1288 G1CollectedHeap* _g1h;
1289 1289 FreeRegionList* _cleanup_list;
1290 1290 HeapRegionClaimer _hrclaimer;
1291 1291
1292 1292 public:
1293 1293 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1294 1294 AbstractGangTask("G1 Cleanup"),
1295 1295 _g1h(g1h),
1296 1296 _cleanup_list(cleanup_list),
1297 1297 _hrclaimer(n_workers) {
1298 1298 }
1299 1299
1300 1300 void work(uint worker_id) {
1301 1301 FreeRegionList local_cleanup_list("Local Cleanup List");
1302 1302 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1303 1303 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1304 1304 assert(cl.is_complete(), "Shouldn't have aborted!");
1305 1305
1306 1306 // Now update the old/humongous region sets
1307 1307 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1308 1308 {
1309 1309 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1310 1310 _g1h->decrement_summary_bytes(cl.freed_bytes());
1311 1311
1312 1312 _cleanup_list->add_ordered(&local_cleanup_list);
1313 1313 assert(local_cleanup_list.is_empty(), "post-condition");
1314 1314 }
1315 1315 }
1316 1316 };
1317 1317
1318 1318 void G1ConcurrentMark::reclaim_empty_regions() {
1319 1319 WorkGang* workers = _g1h->workers();
1320 1320 FreeRegionList empty_regions_list("Empty Regions After Mark List");
1321 1321
1322 1322 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1323 1323 workers->run_task(&cl);
1324 1324
1325 1325 if (!empty_regions_list.is_empty()) {
1326 1326 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1327 1327 // Now print the empty regions list.
1328 1328 G1HRPrinter* hrp = _g1h->hr_printer();
1329 1329 if (hrp->is_active()) {
1330 1330 FreeRegionListIterator iter(&empty_regions_list);
1331 1331 while (iter.more_available()) {
1332 1332 HeapRegion* hr = iter.get_next();
1333 1333 hrp->cleanup(hr);
1334 1334 }
1335 1335 }
1336 1336 // And actually make them available.
1337 1337 _g1h->prepend_to_freelist(&empty_regions_list);
1338 1338 }
1339 1339 }
1340 1340
1341 1341 void G1ConcurrentMark::compute_new_sizes() {
1342 1342 MetaspaceGC::compute_new_size();
1343 1343
1344 1344 // Cleanup will have freed any regions completely full of garbage.
1345 1345 // Update the soft reference policy with the new heap occupancy.
1346 1346 Universe::update_heap_info_at_gc();
1347 1347
1348 1348 // We reclaimed old regions so we should calculate the sizes to make
1349 1349 // sure we update the old gen/space data.
1350 1350 _g1h->g1mm()->update_sizes();
1351 1351 }
1352 1352
1353 1353 void G1ConcurrentMark::cleanup() {
1354 1354 assert_at_safepoint_on_vm_thread();
1355 1355
1356 1356 // If a full collection has happened, we shouldn't do this.
1357 1357 if (has_aborted()) {
1358 1358 return;
1359 1359 }
1360 1360
1361 1361 G1Policy* policy = _g1h->policy();
1362 1362 policy->record_concurrent_mark_cleanup_start();
1363 1363
1364 1364 double start = os::elapsedTime();
1365 1365
1366 1366 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1367 1367
1368 1368 {
1369 1369 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1370 1370 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1371 1371 _g1h->heap_region_iterate(&cl);
1372 1372 }
1373 1373
1374 1374 if (log_is_enabled(Trace, gc, liveness)) {
1375 1375 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1376 1376 _g1h->heap_region_iterate(&cl);
1377 1377 }
1378 1378
1379 1379 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1380 1380
1381 1381 // We need to make this be a "collection" so any collection pause that
1382 1382 // races with it goes around and waits for Cleanup to finish.
1383 1383 _g1h->increment_total_collections();
1384 1384
1385 1385 // Local statistics
1386 1386 double recent_cleanup_time = (os::elapsedTime() - start);
1387 1387 _total_cleanup_time += recent_cleanup_time;
1388 1388 _cleanup_times.add(recent_cleanup_time);
1389 1389
1390 1390 {
1391 1391 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1392 1392 policy->record_concurrent_mark_cleanup_end();
1393 1393 }
1394 1394 }
1395 1395
1396 1396 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1397 1397 // Uses the G1CMTask associated with a worker thread (for serial reference
1398 1398 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1399 1399 // trace referent objects.
1400 1400 //
1401 1401 // Using the G1CMTask and embedded local queues avoids having the worker
1402 1402 // threads operating on the global mark stack. This reduces the risk
1403 1403 // of overflowing the stack - which we would rather avoid at this late
1404 1404 // state. Also using the tasks' local queues removes the potential
1405 1405 // of the workers interfering with each other that could occur if
1406 1406 // operating on the global stack.
1407 1407
1408 1408 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1409 1409 G1ConcurrentMark* _cm;
1410 1410 G1CMTask* _task;
1411 1411 uint _ref_counter_limit;
1412 1412 uint _ref_counter;
1413 1413 bool _is_serial;
1414 1414 public:
1415 1415 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1416 1416 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1417 1417 _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1418 1418 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1419 1419 }
1420 1420
1421 1421 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1422 1422 virtual void do_oop( oop* p) { do_oop_work(p); }
1423 1423
1424 1424 template <class T> void do_oop_work(T* p) {
1425 1425 if (_cm->has_overflown()) {
1426 1426 return;
1427 1427 }
1428 1428 if (!_task->deal_with_reference(p)) {
1429 1429 // We did not add anything to the mark bitmap (or mark stack), so there is
1430 1430 // no point trying to drain it.
1431 1431 return;
1432 1432 }
1433 1433 _ref_counter--;
1434 1434
1435 1435 if (_ref_counter == 0) {
1436 1436 // We have dealt with _ref_counter_limit references, pushing them
1437 1437 // and objects reachable from them on to the local stack (and
1438 1438 // possibly the global stack). Call G1CMTask::do_marking_step() to
1439 1439 // process these entries.
1440 1440 //
1441 1441 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1442 1442 // there's nothing more to do (i.e. we're done with the entries that
1443 1443 // were pushed as a result of the G1CMTask::deal_with_reference() calls
1444 1444 // above) or we overflow.
1445 1445 //
1446 1446 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1447 1447 // flag while there may still be some work to do. (See the comment at
1448 1448 // the beginning of G1CMTask::do_marking_step() for those conditions -
1449 1449 // one of which is reaching the specified time target.) It is only
1450 1450 // when G1CMTask::do_marking_step() returns without setting the
1451 1451 // has_aborted() flag that the marking step has completed.
1452 1452 do {
1453 1453 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1454 1454 _task->do_marking_step(mark_step_duration_ms,
1455 1455 false /* do_termination */,
1456 1456 _is_serial);
1457 1457 } while (_task->has_aborted() && !_cm->has_overflown());
1458 1458 _ref_counter = _ref_counter_limit;
1459 1459 }
1460 1460 }
1461 1461 };
1462 1462
1463 1463 // 'Drain' oop closure used by both serial and parallel reference processing.
1464 1464 // Uses the G1CMTask associated with a given worker thread (for serial
1465 1465 // reference processing the G1CMtask for worker 0 is used). Calls the
1466 1466 // do_marking_step routine, with an unbelievably large timeout value,
1467 1467 // to drain the marking data structures of the remaining entries
1468 1468 // added by the 'keep alive' oop closure above.
1469 1469
1470 1470 class G1CMDrainMarkingStackClosure : public VoidClosure {
1471 1471 G1ConcurrentMark* _cm;
1472 1472 G1CMTask* _task;
1473 1473 bool _is_serial;
1474 1474 public:
1475 1475 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1476 1476 _cm(cm), _task(task), _is_serial(is_serial) {
1477 1477 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1478 1478 }
1479 1479
1480 1480 void do_void() {
1481 1481 do {
1482 1482 // We call G1CMTask::do_marking_step() to completely drain the local
1483 1483 // and global marking stacks of entries pushed by the 'keep alive'
1484 1484 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1485 1485 //
1486 1486 // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1487 1487 // if there's nothing more to do (i.e. we've completely drained the
1488 1488 // entries that were pushed as a a result of applying the 'keep alive'
1489 1489 // closure to the entries on the discovered ref lists) or we overflow
1490 1490 // the global marking stack.
1491 1491 //
1492 1492 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1493 1493 // flag while there may still be some work to do. (See the comment at
1494 1494 // the beginning of G1CMTask::do_marking_step() for those conditions -
1495 1495 // one of which is reaching the specified time target.) It is only
1496 1496 // when G1CMTask::do_marking_step() returns without setting the
1497 1497 // has_aborted() flag that the marking step has completed.
1498 1498
1499 1499 _task->do_marking_step(1000000000.0 /* something very large */,
1500 1500 true /* do_termination */,
1501 1501 _is_serial);
1502 1502 } while (_task->has_aborted() && !_cm->has_overflown());
1503 1503 }
1504 1504 };
1505 1505
1506 1506 // Implementation of AbstractRefProcTaskExecutor for parallel
1507 1507 // reference processing at the end of G1 concurrent marking
1508 1508
1509 1509 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1510 1510 private:
1511 1511 G1CollectedHeap* _g1h;
1512 1512 G1ConcurrentMark* _cm;
1513 1513 WorkGang* _workers;
1514 1514 uint _active_workers;
1515 1515
1516 1516 public:
1517 1517 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1518 1518 G1ConcurrentMark* cm,
1519 1519 WorkGang* workers,
1520 1520 uint n_workers) :
1521 1521 _g1h(g1h), _cm(cm),
1522 1522 _workers(workers), _active_workers(n_workers) { }
1523 1523
1524 1524 virtual void execute(ProcessTask& task, uint ergo_workers);
1525 1525 };
1526 1526
1527 1527 class G1CMRefProcTaskProxy : public AbstractGangTask {
1528 1528 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1529 1529 ProcessTask& _proc_task;
1530 1530 G1CollectedHeap* _g1h;
1531 1531 G1ConcurrentMark* _cm;
1532 1532
1533 1533 public:
1534 1534 G1CMRefProcTaskProxy(ProcessTask& proc_task,
1535 1535 G1CollectedHeap* g1h,
1536 1536 G1ConcurrentMark* cm) :
1537 1537 AbstractGangTask("Process reference objects in parallel"),
1538 1538 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1539 1539 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1540 1540 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1541 1541 }
1542 1542
1543 1543 virtual void work(uint worker_id) {
1544 1544 ResourceMark rm;
1545 1545 HandleMark hm;
1546 1546 G1CMTask* task = _cm->task(worker_id);
1547 1547 G1CMIsAliveClosure g1_is_alive(_g1h);
1548 1548 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1549 1549 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1550 1550
1551 1551 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1552 1552 }
1553 1553 };
1554 1554
1555 1555 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1556 1556 assert(_workers != NULL, "Need parallel worker threads.");
1557 1557 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1558 1558 assert(_workers->active_workers() >= ergo_workers,
1559 1559 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1560 1560 ergo_workers, _workers->active_workers());
1561 1561
1562 1562 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1563 1563
1564 1564 // We need to reset the concurrency level before each
1565 1565 // proxy task execution, so that the termination protocol
1566 1566 // and overflow handling in G1CMTask::do_marking_step() knows
1567 1567 // how many workers to wait for.
1568 1568 _cm->set_concurrency(ergo_workers);
1569 1569 _workers->run_task(&proc_task_proxy, ergo_workers);
1570 1570 }
1571 1571
1572 1572 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1573 1573 ResourceMark rm;
1574 1574 HandleMark hm;
1575 1575
1576 1576 // Is alive closure.
1577 1577 G1CMIsAliveClosure g1_is_alive(_g1h);
1578 1578
1579 1579 // Inner scope to exclude the cleaning of the string table
1580 1580 // from the displayed time.
1581 1581 {
1582 1582 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1583 1583
1584 1584 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1585 1585
1586 1586 // See the comment in G1CollectedHeap::ref_processing_init()
1587 1587 // about how reference processing currently works in G1.
1588 1588
1589 1589 // Set the soft reference policy
1590 1590 rp->setup_policy(clear_all_soft_refs);
1591 1591 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1592 1592
1593 1593 // Instances of the 'Keep Alive' and 'Complete GC' closures used
1594 1594 // in serial reference processing. Note these closures are also
1595 1595 // used for serially processing (by the the current thread) the
1596 1596 // JNI references during parallel reference processing.
1597 1597 //
1598 1598 // These closures do not need to synchronize with the worker
1599 1599 // threads involved in parallel reference processing as these
1600 1600 // instances are executed serially by the current thread (e.g.
1601 1601 // reference processing is not multi-threaded and is thus
1602 1602 // performed by the current thread instead of a gang worker).
1603 1603 //
1604 1604 // The gang tasks involved in parallel reference processing create
1605 1605 // their own instances of these closures, which do their own
1606 1606 // synchronization among themselves.
1607 1607 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1608 1608 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1609 1609
1610 1610 // We need at least one active thread. If reference processing
1611 1611 // is not multi-threaded we use the current (VMThread) thread,
1612 1612 // otherwise we use the work gang from the G1CollectedHeap and
1613 1613 // we utilize all the worker threads we can.
1614 1614 bool processing_is_mt = rp->processing_is_mt();
1615 1615 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1616 1616 active_workers = clamp(active_workers, 1u, _max_num_tasks);
1617 1617
1618 1618 // Parallel processing task executor.
1619 1619 G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1620 1620 _g1h->workers(), active_workers);
1621 1621 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1622 1622
1623 1623 // Set the concurrency level. The phase was already set prior to
1624 1624 // executing the remark task.
1625 1625 set_concurrency(active_workers);
1626 1626
1627 1627 // Set the degree of MT processing here. If the discovery was done MT,
1628 1628 // the number of threads involved during discovery could differ from
1629 1629 // the number of active workers. This is OK as long as the discovered
1630 1630 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1631 1631 rp->set_active_mt_degree(active_workers);
1632 1632
1633 1633 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1634 1634
1635 1635 // Process the weak references.
1636 1636 const ReferenceProcessorStats& stats =
1637 1637 rp->process_discovered_references(&g1_is_alive,
1638 1638 &g1_keep_alive,
1639 1639 &g1_drain_mark_stack,
1640 1640 executor,
1641 1641 &pt);
1642 1642 _gc_tracer_cm->report_gc_reference_stats(stats);
1643 1643 pt.print_all_references();
1644 1644
1645 1645 // The do_oop work routines of the keep_alive and drain_marking_stack
1646 1646 // oop closures will set the has_overflown flag if we overflow the
1647 1647 // global marking stack.
1648 1648
1649 1649 assert(has_overflown() || _global_mark_stack.is_empty(),
1650 1650 "Mark stack should be empty (unless it has overflown)");
1651 1651
1652 1652 assert(rp->num_queues() == active_workers, "why not");
1653 1653
1654 1654 rp->verify_no_references_recorded();
1655 1655 assert(!rp->discovery_enabled(), "Post condition");
1656 1656 }
1657 1657
1658 1658 if (has_overflown()) {
1659 1659 // We can not trust g1_is_alive and the contents of the heap if the marking stack
1660 1660 // overflowed while processing references. Exit the VM.
1661 1661 fatal("Overflow during reference processing, can not continue. Please "
1662 1662 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1663 1663 "restart.", MarkStackSizeMax);
1664 1664 return;
1665 1665 }
1666 1666
1667 1667 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1668 1668
1669 1669 {
1670 1670 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1671 1671 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1672 1672 }
1673 1673
1674 1674 // Unload Klasses, String, Code Cache, etc.
1675 1675 if (ClassUnloadingWithConcurrentMark) {
1676 1676 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1677 1677 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1678 1678 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1679 1679 } else if (StringDedup::is_enabled()) {
1680 1680 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1681 1681 _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1682 1682 }
1683 1683 }
1684 1684
1685 1685 class G1PrecleanYieldClosure : public YieldClosure {
1686 1686 G1ConcurrentMark* _cm;
1687 1687
1688 1688 public:
1689 1689 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1690 1690
1691 1691 virtual bool should_return() {
1692 1692 return _cm->has_aborted();
1693 1693 }
1694 1694
1695 1695 virtual bool should_return_fine_grain() {
1696 1696 _cm->do_yield_check();
1697 1697 return _cm->has_aborted();
1698 1698 }
1699 1699 };
1700 1700
1701 1701 void G1ConcurrentMark::preclean() {
1702 1702 assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1703 1703
1704 1704 SuspendibleThreadSetJoiner joiner;
1705 1705
1706 1706 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1707 1707 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1708 1708
1709 1709 set_concurrency_and_phase(1, true);
1710 1710
1711 1711 G1PrecleanYieldClosure yield_cl(this);
1712 1712
1713 1713 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1714 1714 // Precleaning is single threaded. Temporarily disable MT discovery.
1715 1715 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1716 1716 rp->preclean_discovered_references(rp->is_alive_non_header(),
1717 1717 &keep_alive,
1718 1718 &drain_mark_stack,
1719 1719 &yield_cl,
1720 1720 _gc_timer_cm);
1721 1721 }
1722 1722
1723 1723 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1724 1724 // the prev bitmap determining liveness.
1725 1725 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1726 1726 G1CollectedHeap* _g1h;
1727 1727 public:
1728 1728 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1729 1729
1730 1730 bool do_object_b(oop obj) {
1731 1731 HeapWord* addr = (HeapWord*)obj;
1732 1732 return addr != NULL &&
1733 1733 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1734 1734 }
1735 1735 };
1736 1736
1737 1737 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1738 1738 // Depending on the completion of the marking liveness needs to be determined
1739 1739 // using either the next or prev bitmap.
1740 1740 if (mark_completed) {
1741 1741 G1ObjectCountIsAliveClosure is_alive(_g1h);
1742 1742 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1743 1743 } else {
1744 1744 G1CMIsAliveClosure is_alive(_g1h);
1745 1745 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1746 1746 }
1747 1747 }
1748 1748
1749 1749
1750 1750 void G1ConcurrentMark::swap_mark_bitmaps() {
1751 1751 G1CMBitMap* temp = _prev_mark_bitmap;
1752 1752 _prev_mark_bitmap = _next_mark_bitmap;
1753 1753 _next_mark_bitmap = temp;
1754 1754 _g1h->collector_state()->set_clearing_next_bitmap(true);
1755 1755 }
1756 1756
1757 1757 // Closure for marking entries in SATB buffers.
1758 1758 class G1CMSATBBufferClosure : public SATBBufferClosure {
1759 1759 private:
1760 1760 G1CMTask* _task;
1761 1761 G1CollectedHeap* _g1h;
1762 1762
1763 1763 // This is very similar to G1CMTask::deal_with_reference, but with
1764 1764 // more relaxed requirements for the argument, so this must be more
1765 1765 // circumspect about treating the argument as an object.
1766 1766 void do_entry(void* entry) const {
1767 1767 _task->increment_refs_reached();
1768 1768 oop const obj = static_cast<oop>(entry);
1769 1769 _task->make_reference_grey(obj);
1770 1770 }
1771 1771
1772 1772 public:
1773 1773 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1774 1774 : _task(task), _g1h(g1h) { }
1775 1775
1776 1776 virtual void do_buffer(void** buffer, size_t size) {
1777 1777 for (size_t i = 0; i < size; ++i) {
1778 1778 do_entry(buffer[i]);
1779 1779 }
1780 1780 }
1781 1781 };
1782 1782
1783 1783 class G1RemarkThreadsClosure : public ThreadClosure {
1784 1784 G1CMSATBBufferClosure _cm_satb_cl;
1785 1785 G1CMOopClosure _cm_cl;
1786 1786 MarkingCodeBlobClosure _code_cl;
1787 1787 uintx _claim_token;
1788 1788
1789 1789 public:
1790 1790 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1791 1791 _cm_satb_cl(task, g1h),
1792 1792 _cm_cl(g1h, task),
1793 1793 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1794 1794 _claim_token(Threads::thread_claim_token()) {}
1795 1795
1796 1796 void do_thread(Thread* thread) {
1797 1797 if (thread->claim_threads_do(true, _claim_token)) {
1798 1798 SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1799 1799 queue.apply_closure_and_empty(&_cm_satb_cl);
1800 1800 if (thread->is_Java_thread()) {
1801 1801 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1802 1802 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1803 1803 // * Alive if on the stack of an executing method
1804 1804 // * Weakly reachable otherwise
1805 1805 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1806 1806 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1807 1807 JavaThread* jt = (JavaThread*)thread;
1808 1808 jt->nmethods_do(&_code_cl);
1809 1809 }
1810 1810 }
1811 1811 }
1812 1812 };
1813 1813
1814 1814 class G1CMRemarkTask : public AbstractGangTask {
1815 1815 G1ConcurrentMark* _cm;
1816 1816 public:
1817 1817 void work(uint worker_id) {
1818 1818 G1CMTask* task = _cm->task(worker_id);
1819 1819 task->record_start_time();
1820 1820 {
1821 1821 ResourceMark rm;
1822 1822 HandleMark hm;
1823 1823
1824 1824 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1825 1825 Threads::threads_do(&threads_f);
1826 1826 }
1827 1827
1828 1828 do {
1829 1829 task->do_marking_step(1000000000.0 /* something very large */,
1830 1830 true /* do_termination */,
1831 1831 false /* is_serial */);
1832 1832 } while (task->has_aborted() && !_cm->has_overflown());
1833 1833 // If we overflow, then we do not want to restart. We instead
1834 1834 // want to abort remark and do concurrent marking again.
1835 1835 task->record_end_time();
1836 1836 }
1837 1837
1838 1838 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1839 1839 AbstractGangTask("Par Remark"), _cm(cm) {
1840 1840 _cm->terminator()->reset_for_reuse(active_workers);
1841 1841 }
1842 1842 };
1843 1843
1844 1844 void G1ConcurrentMark::finalize_marking() {
1845 1845 ResourceMark rm;
1846 1846 HandleMark hm;
1847 1847
1848 1848 _g1h->ensure_parsability(false);
1849 1849
1850 1850 // this is remark, so we'll use up all active threads
1851 1851 uint active_workers = _g1h->workers()->active_workers();
1852 1852 set_concurrency_and_phase(active_workers, false /* concurrent */);
1853 1853 // Leave _parallel_marking_threads at it's
1854 1854 // value originally calculated in the G1ConcurrentMark
1855 1855 // constructor and pass values of the active workers
1856 1856 // through the gang in the task.
1857 1857
1858 1858 {
1859 1859 StrongRootsScope srs(active_workers);
1860 1860
1861 1861 G1CMRemarkTask remarkTask(this, active_workers);
1862 1862 // We will start all available threads, even if we decide that the
1863 1863 // active_workers will be fewer. The extra ones will just bail out
1864 1864 // immediately.
1865 1865 _g1h->workers()->run_task(&remarkTask);
1866 1866 }
1867 1867
1868 1868 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1869 1869 guarantee(has_overflown() ||
1870 1870 satb_mq_set.completed_buffers_num() == 0,
1871 1871 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1872 1872 BOOL_TO_STR(has_overflown()),
1873 1873 satb_mq_set.completed_buffers_num());
1874 1874
1875 1875 print_stats();
1876 1876 }
1877 1877
1878 1878 void G1ConcurrentMark::flush_all_task_caches() {
1879 1879 size_t hits = 0;
1880 1880 size_t misses = 0;
1881 1881 for (uint i = 0; i < _max_num_tasks; i++) {
1882 1882 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1883 1883 hits += stats.first;
1884 1884 misses += stats.second;
1885 1885 }
1886 1886 size_t sum = hits + misses;
1887 1887 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1888 1888 hits, misses, percent_of(hits, sum));
1889 1889 }
1890 1890
1891 1891 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1892 1892 _prev_mark_bitmap->clear_range(mr);
1893 1893 }
1894 1894
1895 1895 HeapRegion*
1896 1896 G1ConcurrentMark::claim_region(uint worker_id) {
1897 1897 // "checkpoint" the finger
1898 1898 HeapWord* finger = _finger;
1899 1899
1900 1900 while (finger < _heap.end()) {
1901 1901 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1902 1902
1903 1903 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1904 1904 // Make sure that the reads below do not float before loading curr_region.
1905 1905 OrderAccess::loadload();
1906 1906 // Above heap_region_containing may return NULL as we always scan claim
1907 1907 // until the end of the heap. In this case, just jump to the next region.
1908 1908 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1909 1909
1910 1910 // Is the gap between reading the finger and doing the CAS too long?
1911 1911 HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1912 1912 if (res == finger && curr_region != NULL) {
1913 1913 // we succeeded
1914 1914 HeapWord* bottom = curr_region->bottom();
1915 1915 HeapWord* limit = curr_region->next_top_at_mark_start();
1916 1916
1917 1917 // notice that _finger == end cannot be guaranteed here since,
1918 1918 // someone else might have moved the finger even further
1919 1919 assert(_finger >= end, "the finger should have moved forward");
1920 1920
1921 1921 if (limit > bottom) {
1922 1922 return curr_region;
1923 1923 } else {
1924 1924 assert(limit == bottom,
1925 1925 "the region limit should be at bottom");
1926 1926 // we return NULL and the caller should try calling
1927 1927 // claim_region() again.
1928 1928 return NULL;
1929 1929 }
1930 1930 } else {
1931 1931 assert(_finger > finger, "the finger should have moved forward");
1932 1932 // read it again
1933 1933 finger = _finger;
1934 1934 }
1935 1935 }
1936 1936
1937 1937 return NULL;
1938 1938 }
1939 1939
1940 1940 #ifndef PRODUCT
1941 1941 class VerifyNoCSetOops {
1942 1942 G1CollectedHeap* _g1h;
1943 1943 const char* _phase;
1944 1944 int _info;
1945 1945
1946 1946 public:
1947 1947 VerifyNoCSetOops(const char* phase, int info = -1) :
1948 1948 _g1h(G1CollectedHeap::heap()),
1949 1949 _phase(phase),
1950 1950 _info(info)
1951 1951 { }
1952 1952
1953 1953 void operator()(G1TaskQueueEntry task_entry) const {
1954 1954 if (task_entry.is_array_slice()) {
1955 1955 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1956 1956 return;
1957 1957 }
1958 1958 guarantee(oopDesc::is_oop(task_entry.obj()),
1959 1959 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1960 1960 p2i(task_entry.obj()), _phase, _info);
1961 1961 HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1962 1962 guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1963 1963 "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1964 1964 p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1965 1965 }
1966 1966 };
1967 1967
1968 1968 void G1ConcurrentMark::verify_no_collection_set_oops() {
1969 1969 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1970 1970 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1971 1971 return;
1972 1972 }
1973 1973
1974 1974 // Verify entries on the global mark stack
1975 1975 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1976 1976
1977 1977 // Verify entries on the task queues
1978 1978 for (uint i = 0; i < _max_num_tasks; ++i) {
1979 1979 G1CMTaskQueue* queue = _task_queues->queue(i);
1980 1980 queue->iterate(VerifyNoCSetOops("Queue", i));
1981 1981 }
1982 1982
1983 1983 // Verify the global finger
1984 1984 HeapWord* global_finger = finger();
1985 1985 if (global_finger != NULL && global_finger < _heap.end()) {
1986 1986 // Since we always iterate over all regions, we might get a NULL HeapRegion
1987 1987 // here.
1988 1988 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1989 1989 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1990 1990 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1991 1991 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1992 1992 }
1993 1993
1994 1994 // Verify the task fingers
1995 1995 assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1996 1996 for (uint i = 0; i < _num_concurrent_workers; ++i) {
1997 1997 G1CMTask* task = _tasks[i];
1998 1998 HeapWord* task_finger = task->finger();
1999 1999 if (task_finger != NULL && task_finger < _heap.end()) {
2000 2000 // See above note on the global finger verification.
2001 2001 HeapRegion* r = _g1h->heap_region_containing(task_finger);
2002 2002 guarantee(r == NULL || task_finger == r->bottom() ||
2003 2003 !r->in_collection_set() || !r->has_index_in_opt_cset(),
2004 2004 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2005 2005 p2i(task_finger), HR_FORMAT_PARAMS(r));
2006 2006 }
2007 2007 }
2008 2008 }
2009 2009 #endif // PRODUCT
2010 2010
2011 2011 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2012 2012 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2013 2013 }
2014 2014
2015 2015 void G1ConcurrentMark::print_stats() {
2016 2016 if (!log_is_enabled(Debug, gc, stats)) {
2017 2017 return;
2018 2018 }
2019 2019 log_debug(gc, stats)("---------------------------------------------------------------------");
2020 2020 for (size_t i = 0; i < _num_active_tasks; ++i) {
2021 2021 _tasks[i]->print_stats();
2022 2022 log_debug(gc, stats)("---------------------------------------------------------------------");
2023 2023 }
2024 2024 }
2025 2025
2026 2026 void G1ConcurrentMark::concurrent_cycle_abort() {
2027 2027 if (!cm_thread()->during_cycle() || _has_aborted) {
2028 2028 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2029 2029 return;
2030 2030 }
2031 2031
2032 2032 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2033 2033 // concurrent bitmap clearing.
2034 2034 {
2035 2035 GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2036 2036 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2037 2037 }
2038 2038 // Note we cannot clear the previous marking bitmap here
2039 2039 // since VerifyDuringGC verifies the objects marked during
2040 2040 // a full GC against the previous bitmap.
2041 2041
2042 2042 // Empty mark stack
2043 2043 reset_marking_for_restart();
2044 2044 for (uint i = 0; i < _max_num_tasks; ++i) {
2045 2045 _tasks[i]->clear_region_fields();
2046 2046 }
2047 2047 _first_overflow_barrier_sync.abort();
2048 2048 _second_overflow_barrier_sync.abort();
2049 2049 _has_aborted = true;
2050 2050
2051 2051 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2052 2052 satb_mq_set.abandon_partial_marking();
2053 2053 // This can be called either during or outside marking, we'll read
2054 2054 // the expected_active value from the SATB queue set.
2055 2055 satb_mq_set.set_active_all_threads(
2056 2056 false, /* new active value */
2057 2057 satb_mq_set.is_active() /* expected_active */);
2058 2058 }
2059 2059
2060 2060 static void print_ms_time_info(const char* prefix, const char* name,
2061 2061 NumberSeq& ns) {
2062 2062 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2063 2063 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2064 2064 if (ns.num() > 0) {
2065 2065 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2066 2066 prefix, ns.sd(), ns.maximum());
2067 2067 }
2068 2068 }
2069 2069
2070 2070 void G1ConcurrentMark::print_summary_info() {
2071 2071 Log(gc, marking) log;
2072 2072 if (!log.is_trace()) {
2073 2073 return;
2074 2074 }
2075 2075
2076 2076 log.trace(" Concurrent marking:");
2077 2077 print_ms_time_info(" ", "init marks", _init_times);
2078 2078 print_ms_time_info(" ", "remarks", _remark_times);
2079 2079 {
2080 2080 print_ms_time_info(" ", "final marks", _remark_mark_times);
2081 2081 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
2082 2082
2083 2083 }
2084 2084 print_ms_time_info(" ", "cleanups", _cleanup_times);
2085 2085 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2086 2086 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2087 2087 log.trace(" Total stop_world time = %8.2f s.",
2088 2088 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2089 2089 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).",
2090 2090 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2091 2091 }
2092 2092
2093 2093 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2094 2094 _concurrent_workers->print_worker_threads_on(st);
2095 2095 }
2096 2096
2097 2097 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2098 2098 _concurrent_workers->threads_do(tc);
2099 2099 }
2100 2100
2101 2101 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2102 2102 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2103 2103 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2104 2104 _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2105 2105 _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2106 2106 }
2107 2107
2108 2108 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2109 2109 ReferenceProcessor* result = g1h->ref_processor_cm();
2110 2110 assert(result != NULL, "CM reference processor should not be NULL");
2111 2111 return result;
2112 2112 }
2113 2113
2114 2114 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2115 2115 G1CMTask* task)
2116 2116 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2117 2117 _g1h(g1h), _task(task)
2118 2118 { }
2119 2119
2120 2120 void G1CMTask::setup_for_region(HeapRegion* hr) {
2121 2121 assert(hr != NULL,
2122 2122 "claim_region() should have filtered out NULL regions");
2123 2123 _curr_region = hr;
2124 2124 _finger = hr->bottom();
2125 2125 update_region_limit();
2126 2126 }
2127 2127
2128 2128 void G1CMTask::update_region_limit() {
2129 2129 HeapRegion* hr = _curr_region;
2130 2130 HeapWord* bottom = hr->bottom();
2131 2131 HeapWord* limit = hr->next_top_at_mark_start();
2132 2132
2133 2133 if (limit == bottom) {
2134 2134 // The region was collected underneath our feet.
2135 2135 // We set the finger to bottom to ensure that the bitmap
2136 2136 // iteration that will follow this will not do anything.
2137 2137 // (this is not a condition that holds when we set the region up,
2138 2138 // as the region is not supposed to be empty in the first place)
2139 2139 _finger = bottom;
2140 2140 } else if (limit >= _region_limit) {
2141 2141 assert(limit >= _finger, "peace of mind");
2142 2142 } else {
2143 2143 assert(limit < _region_limit, "only way to get here");
2144 2144 // This can happen under some pretty unusual circumstances. An
2145 2145 // evacuation pause empties the region underneath our feet (NTAMS
2146 2146 // at bottom). We then do some allocation in the region (NTAMS
2147 2147 // stays at bottom), followed by the region being used as a GC
2148 2148 // alloc region (NTAMS will move to top() and the objects
2149 2149 // originally below it will be grayed). All objects now marked in
2150 2150 // the region are explicitly grayed, if below the global finger,
2151 2151 // and we do not need in fact to scan anything else. So, we simply
2152 2152 // set _finger to be limit to ensure that the bitmap iteration
2153 2153 // doesn't do anything.
2154 2154 _finger = limit;
2155 2155 }
2156 2156
2157 2157 _region_limit = limit;
2158 2158 }
2159 2159
2160 2160 void G1CMTask::giveup_current_region() {
2161 2161 assert(_curr_region != NULL, "invariant");
2162 2162 clear_region_fields();
2163 2163 }
2164 2164
2165 2165 void G1CMTask::clear_region_fields() {
2166 2166 // Values for these three fields that indicate that we're not
2167 2167 // holding on to a region.
2168 2168 _curr_region = NULL;
2169 2169 _finger = NULL;
2170 2170 _region_limit = NULL;
2171 2171 }
2172 2172
2173 2173 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2174 2174 if (cm_oop_closure == NULL) {
2175 2175 assert(_cm_oop_closure != NULL, "invariant");
2176 2176 } else {
2177 2177 assert(_cm_oop_closure == NULL, "invariant");
2178 2178 }
2179 2179 _cm_oop_closure = cm_oop_closure;
2180 2180 }
2181 2181
2182 2182 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2183 2183 guarantee(next_mark_bitmap != NULL, "invariant");
2184 2184 _next_mark_bitmap = next_mark_bitmap;
2185 2185 clear_region_fields();
2186 2186
2187 2187 _calls = 0;
2188 2188 _elapsed_time_ms = 0.0;
2189 2189 _termination_time_ms = 0.0;
2190 2190 _termination_start_time_ms = 0.0;
2191 2191
2192 2192 _mark_stats_cache.reset();
2193 2193 }
2194 2194
2195 2195 bool G1CMTask::should_exit_termination() {
2196 2196 if (!regular_clock_call()) {
2197 2197 return true;
2198 2198 }
2199 2199
2200 2200 // This is called when we are in the termination protocol. We should
2201 2201 // quit if, for some reason, this task wants to abort or the global
2202 2202 // stack is not empty (this means that we can get work from it).
2203 2203 return !_cm->mark_stack_empty() || has_aborted();
2204 2204 }
2205 2205
2206 2206 void G1CMTask::reached_limit() {
2207 2207 assert(_words_scanned >= _words_scanned_limit ||
2208 2208 _refs_reached >= _refs_reached_limit ,
2209 2209 "shouldn't have been called otherwise");
2210 2210 abort_marking_if_regular_check_fail();
2211 2211 }
2212 2212
2213 2213 bool G1CMTask::regular_clock_call() {
2214 2214 if (has_aborted()) {
2215 2215 return false;
2216 2216 }
2217 2217
2218 2218 // First, we need to recalculate the words scanned and refs reached
2219 2219 // limits for the next clock call.
2220 2220 recalculate_limits();
2221 2221
2222 2222 // During the regular clock call we do the following
2223 2223
2224 2224 // (1) If an overflow has been flagged, then we abort.
2225 2225 if (_cm->has_overflown()) {
2226 2226 return false;
2227 2227 }
2228 2228
2229 2229 // If we are not concurrent (i.e. we're doing remark) we don't need
2230 2230 // to check anything else. The other steps are only needed during
2231 2231 // the concurrent marking phase.
2232 2232 if (!_cm->concurrent()) {
2233 2233 return true;
2234 2234 }
2235 2235
2236 2236 // (2) If marking has been aborted for Full GC, then we also abort.
2237 2237 if (_cm->has_aborted()) {
2238 2238 return false;
2239 2239 }
2240 2240
2241 2241 double curr_time_ms = os::elapsedVTime() * 1000.0;
2242 2242
2243 2243 // (4) We check whether we should yield. If we have to, then we abort.
2244 2244 if (SuspendibleThreadSet::should_yield()) {
2245 2245 // We should yield. To do this we abort the task. The caller is
2246 2246 // responsible for yielding.
2247 2247 return false;
2248 2248 }
2249 2249
2250 2250 // (5) We check whether we've reached our time quota. If we have,
2251 2251 // then we abort.
2252 2252 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2253 2253 if (elapsed_time_ms > _time_target_ms) {
2254 2254 _has_timed_out = true;
2255 2255 return false;
2256 2256 }
2257 2257
2258 2258 // (6) Finally, we check whether there are enough completed STAB
2259 2259 // buffers available for processing. If there are, we abort.
2260 2260 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2261 2261 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2262 2262 // we do need to process SATB buffers, we'll abort and restart
2263 2263 // the marking task to do so
2264 2264 return false;
2265 2265 }
2266 2266 return true;
2267 2267 }
2268 2268
2269 2269 void G1CMTask::recalculate_limits() {
2270 2270 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2271 2271 _words_scanned_limit = _real_words_scanned_limit;
2272 2272
2273 2273 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2274 2274 _refs_reached_limit = _real_refs_reached_limit;
2275 2275 }
2276 2276
2277 2277 void G1CMTask::decrease_limits() {
2278 2278 // This is called when we believe that we're going to do an infrequent
2279 2279 // operation which will increase the per byte scanned cost (i.e. move
2280 2280 // entries to/from the global stack). It basically tries to decrease the
2281 2281 // scanning limit so that the clock is called earlier.
2282 2282
2283 2283 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2284 2284 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2285 2285 }
2286 2286
2287 2287 void G1CMTask::move_entries_to_global_stack() {
2288 2288 // Local array where we'll store the entries that will be popped
2289 2289 // from the local queue.
2290 2290 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2291 2291
2292 2292 size_t n = 0;
2293 2293 G1TaskQueueEntry task_entry;
2294 2294 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2295 2295 buffer[n] = task_entry;
2296 2296 ++n;
2297 2297 }
2298 2298 if (n < G1CMMarkStack::EntriesPerChunk) {
2299 2299 buffer[n] = G1TaskQueueEntry();
2300 2300 }
2301 2301
2302 2302 if (n > 0) {
2303 2303 if (!_cm->mark_stack_push(buffer)) {
2304 2304 set_has_aborted();
2305 2305 }
2306 2306 }
2307 2307
2308 2308 // This operation was quite expensive, so decrease the limits.
2309 2309 decrease_limits();
2310 2310 }
2311 2311
2312 2312 bool G1CMTask::get_entries_from_global_stack() {
2313 2313 // Local array where we'll store the entries that will be popped
2314 2314 // from the global stack.
2315 2315 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2316 2316
2317 2317 if (!_cm->mark_stack_pop(buffer)) {
2318 2318 return false;
2319 2319 }
2320 2320
2321 2321 // We did actually pop at least one entry.
2322 2322 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2323 2323 G1TaskQueueEntry task_entry = buffer[i];
2324 2324 if (task_entry.is_null()) {
2325 2325 break;
2326 2326 }
2327 2327 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2328 2328 bool success = _task_queue->push(task_entry);
2329 2329 // We only call this when the local queue is empty or under a
2330 2330 // given target limit. So, we do not expect this push to fail.
2331 2331 assert(success, "invariant");
2332 2332 }
2333 2333
2334 2334 // This operation was quite expensive, so decrease the limits
2335 2335 decrease_limits();
2336 2336 return true;
2337 2337 }
2338 2338
2339 2339 void G1CMTask::drain_local_queue(bool partially) {
2340 2340 if (has_aborted()) {
2341 2341 return;
2342 2342 }
2343 2343
2344 2344 // Decide what the target size is, depending whether we're going to
2345 2345 // drain it partially (so that other tasks can steal if they run out
2346 2346 // of things to do) or totally (at the very end).
2347 2347 size_t target_size;
2348 2348 if (partially) {
2349 2349 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2350 2350 } else {
2351 2351 target_size = 0;
2352 2352 }
2353 2353
2354 2354 if (_task_queue->size() > target_size) {
2355 2355 G1TaskQueueEntry entry;
2356 2356 bool ret = _task_queue->pop_local(entry);
2357 2357 while (ret) {
2358 2358 scan_task_entry(entry);
2359 2359 if (_task_queue->size() <= target_size || has_aborted()) {
2360 2360 ret = false;
2361 2361 } else {
2362 2362 ret = _task_queue->pop_local(entry);
2363 2363 }
2364 2364 }
2365 2365 }
2366 2366 }
2367 2367
2368 2368 void G1CMTask::drain_global_stack(bool partially) {
2369 2369 if (has_aborted()) {
2370 2370 return;
2371 2371 }
2372 2372
2373 2373 // We have a policy to drain the local queue before we attempt to
2374 2374 // drain the global stack.
2375 2375 assert(partially || _task_queue->size() == 0, "invariant");
2376 2376
2377 2377 // Decide what the target size is, depending whether we're going to
2378 2378 // drain it partially (so that other tasks can steal if they run out
2379 2379 // of things to do) or totally (at the very end).
2380 2380 // Notice that when draining the global mark stack partially, due to the racyness
2381 2381 // of the mark stack size update we might in fact drop below the target. But,
2382 2382 // this is not a problem.
2383 2383 // In case of total draining, we simply process until the global mark stack is
2384 2384 // totally empty, disregarding the size counter.
2385 2385 if (partially) {
2386 2386 size_t const target_size = _cm->partial_mark_stack_size_target();
2387 2387 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2388 2388 if (get_entries_from_global_stack()) {
2389 2389 drain_local_queue(partially);
2390 2390 }
2391 2391 }
2392 2392 } else {
2393 2393 while (!has_aborted() && get_entries_from_global_stack()) {
2394 2394 drain_local_queue(partially);
2395 2395 }
2396 2396 }
2397 2397 }
2398 2398
2399 2399 // SATB Queue has several assumptions on whether to call the par or
2400 2400 // non-par versions of the methods. this is why some of the code is
2401 2401 // replicated. We should really get rid of the single-threaded version
2402 2402 // of the code to simplify things.
2403 2403 void G1CMTask::drain_satb_buffers() {
2404 2404 if (has_aborted()) {
2405 2405 return;
2406 2406 }
2407 2407
2408 2408 // We set this so that the regular clock knows that we're in the
2409 2409 // middle of draining buffers and doesn't set the abort flag when it
2410 2410 // notices that SATB buffers are available for draining. It'd be
2411 2411 // very counter productive if it did that. :-)
2412 2412 _draining_satb_buffers = true;
2413 2413
2414 2414 G1CMSATBBufferClosure satb_cl(this, _g1h);
2415 2415 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2416 2416
2417 2417 // This keeps claiming and applying the closure to completed buffers
2418 2418 // until we run out of buffers or we need to abort.
2419 2419 while (!has_aborted() &&
2420 2420 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2421 2421 abort_marking_if_regular_check_fail();
2422 2422 }
2423 2423
2424 2424 // Can't assert qset is empty here, even if not aborted. If concurrent,
2425 2425 // some other thread might be adding to the queue. If not concurrent,
2426 2426 // some other thread might have won the race for the last buffer, but
2427 2427 // has not yet decremented the count.
2428 2428
2429 2429 _draining_satb_buffers = false;
2430 2430
2431 2431 // again, this was a potentially expensive operation, decrease the
2432 2432 // limits to get the regular clock call early
2433 2433 decrease_limits();
2434 2434 }
2435 2435
2436 2436 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2437 2437 _mark_stats_cache.reset(region_idx);
2438 2438 }
2439 2439
2440 2440 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2441 2441 return _mark_stats_cache.evict_all();
2442 2442 }
2443 2443
2444 2444 void G1CMTask::print_stats() {
2445 2445 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2446 2446 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2447 2447 _elapsed_time_ms, _termination_time_ms);
2448 2448 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2449 2449 _step_times_ms.num(),
2450 2450 _step_times_ms.avg(),
2451 2451 _step_times_ms.sd(),
2452 2452 _step_times_ms.maximum(),
2453 2453 _step_times_ms.sum());
2454 2454 size_t const hits = _mark_stats_cache.hits();
2455 2455 size_t const misses = _mark_stats_cache.misses();
2456 2456 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2457 2457 hits, misses, percent_of(hits, hits + misses));
2458 2458 }
2459 2459
2460 2460 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2461 2461 return _task_queues->steal(worker_id, task_entry);
2462 2462 }
2463 2463
2464 2464 /*****************************************************************************
2465 2465
2466 2466 The do_marking_step(time_target_ms, ...) method is the building
2467 2467 block of the parallel marking framework. It can be called in parallel
2468 2468 with other invocations of do_marking_step() on different tasks
2469 2469 (but only one per task, obviously) and concurrently with the
2470 2470 mutator threads, or during remark, hence it eliminates the need
2471 2471 for two versions of the code. When called during remark, it will
2472 2472 pick up from where the task left off during the concurrent marking
2473 2473 phase. Interestingly, tasks are also claimable during evacuation
2474 2474 pauses too, since do_marking_step() ensures that it aborts before
2475 2475 it needs to yield.
2476 2476
2477 2477 The data structures that it uses to do marking work are the
2478 2478 following:
2479 2479
2480 2480 (1) Marking Bitmap. If there are gray objects that appear only
2481 2481 on the bitmap (this happens either when dealing with an overflow
2482 2482 or when the initial marking phase has simply marked the roots
2483 2483 and didn't push them on the stack), then tasks claim heap
2484 2484 regions whose bitmap they then scan to find gray objects. A
2485 2485 global finger indicates where the end of the last claimed region
2486 2486 is. A local finger indicates how far into the region a task has
2487 2487 scanned. The two fingers are used to determine how to gray an
2488 2488 object (i.e. whether simply marking it is OK, as it will be
2489 2489 visited by a task in the future, or whether it needs to be also
2490 2490 pushed on a stack).
2491 2491
2492 2492 (2) Local Queue. The local queue of the task which is accessed
2493 2493 reasonably efficiently by the task. Other tasks can steal from
2494 2494 it when they run out of work. Throughout the marking phase, a
2495 2495 task attempts to keep its local queue short but not totally
2496 2496 empty, so that entries are available for stealing by other
2497 2497 tasks. Only when there is no more work, a task will totally
2498 2498 drain its local queue.
2499 2499
2500 2500 (3) Global Mark Stack. This handles local queue overflow. During
2501 2501 marking only sets of entries are moved between it and the local
2502 2502 queues, as access to it requires a mutex and more fine-grain
2503 2503 interaction with it which might cause contention. If it
2504 2504 overflows, then the marking phase should restart and iterate
2505 2505 over the bitmap to identify gray objects. Throughout the marking
2506 2506 phase, tasks attempt to keep the global mark stack at a small
2507 2507 length but not totally empty, so that entries are available for
2508 2508 popping by other tasks. Only when there is no more work, tasks
2509 2509 will totally drain the global mark stack.
2510 2510
2511 2511 (4) SATB Buffer Queue. This is where completed SATB buffers are
2512 2512 made available. Buffers are regularly removed from this queue
2513 2513 and scanned for roots, so that the queue doesn't get too
2514 2514 long. During remark, all completed buffers are processed, as
2515 2515 well as the filled in parts of any uncompleted buffers.
2516 2516
2517 2517 The do_marking_step() method tries to abort when the time target
2518 2518 has been reached. There are a few other cases when the
2519 2519 do_marking_step() method also aborts:
2520 2520
2521 2521 (1) When the marking phase has been aborted (after a Full GC).
2522 2522
2523 2523 (2) When a global overflow (on the global stack) has been
2524 2524 triggered. Before the task aborts, it will actually sync up with
2525 2525 the other tasks to ensure that all the marking data structures
2526 2526 (local queues, stacks, fingers etc.) are re-initialized so that
2527 2527 when do_marking_step() completes, the marking phase can
2528 2528 immediately restart.
2529 2529
2530 2530 (3) When enough completed SATB buffers are available. The
2531 2531 do_marking_step() method only tries to drain SATB buffers right
2532 2532 at the beginning. So, if enough buffers are available, the
2533 2533 marking step aborts and the SATB buffers are processed at
2534 2534 the beginning of the next invocation.
2535 2535
2536 2536 (4) To yield. when we have to yield then we abort and yield
2537 2537 right at the end of do_marking_step(). This saves us from a lot
2538 2538 of hassle as, by yielding we might allow a Full GC. If this
2539 2539 happens then objects will be compacted underneath our feet, the
2540 2540 heap might shrink, etc. We save checking for this by just
2541 2541 aborting and doing the yield right at the end.
2542 2542
2543 2543 From the above it follows that the do_marking_step() method should
2544 2544 be called in a loop (or, otherwise, regularly) until it completes.
2545 2545
2546 2546 If a marking step completes without its has_aborted() flag being
2547 2547 true, it means it has completed the current marking phase (and
2548 2548 also all other marking tasks have done so and have all synced up).
2549 2549
2550 2550 A method called regular_clock_call() is invoked "regularly" (in
2551 2551 sub ms intervals) throughout marking. It is this clock method that
2552 2552 checks all the abort conditions which were mentioned above and
2553 2553 decides when the task should abort. A work-based scheme is used to
2554 2554 trigger this clock method: when the number of object words the
2555 2555 marking phase has scanned or the number of references the marking
2556 2556 phase has visited reach a given limit. Additional invocations to
2557 2557 the method clock have been planted in a few other strategic places
2558 2558 too. The initial reason for the clock method was to avoid calling
2559 2559 vtime too regularly, as it is quite expensive. So, once it was in
2560 2560 place, it was natural to piggy-back all the other conditions on it
2561 2561 too and not constantly check them throughout the code.
2562 2562
2563 2563 If do_termination is true then do_marking_step will enter its
2564 2564 termination protocol.
2565 2565
2566 2566 The value of is_serial must be true when do_marking_step is being
2567 2567 called serially (i.e. by the VMThread) and do_marking_step should
2568 2568 skip any synchronization in the termination and overflow code.
2569 2569 Examples include the serial remark code and the serial reference
2570 2570 processing closures.
2571 2571
2572 2572 The value of is_serial must be false when do_marking_step is
2573 2573 being called by any of the worker threads in a work gang.
2574 2574 Examples include the concurrent marking code (CMMarkingTask),
2575 2575 the MT remark code, and the MT reference processing closures.
2576 2576
2577 2577 *****************************************************************************/
2578 2578
2579 2579 void G1CMTask::do_marking_step(double time_target_ms,
2580 2580 bool do_termination,
2581 2581 bool is_serial) {
2582 2582 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2583 2583
2584 2584 _start_time_ms = os::elapsedVTime() * 1000.0;
2585 2585
2586 2586 // If do_stealing is true then do_marking_step will attempt to
2587 2587 // steal work from the other G1CMTasks. It only makes sense to
2588 2588 // enable stealing when the termination protocol is enabled
2589 2589 // and do_marking_step() is not being called serially.
2590 2590 bool do_stealing = do_termination && !is_serial;
2591 2591
2592 2592 G1Predictions const& predictor = _g1h->policy()->predictor();
2593 2593 double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
2594 2594 _time_target_ms = time_target_ms - diff_prediction_ms;
2595 2595
2596 2596 // set up the variables that are used in the work-based scheme to
2597 2597 // call the regular clock method
2598 2598 _words_scanned = 0;
2599 2599 _refs_reached = 0;
2600 2600 recalculate_limits();
2601 2601
2602 2602 // clear all flags
2603 2603 clear_has_aborted();
2604 2604 _has_timed_out = false;
2605 2605 _draining_satb_buffers = false;
2606 2606
2607 2607 ++_calls;
2608 2608
2609 2609 // Set up the bitmap and oop closures. Anything that uses them is
2610 2610 // eventually called from this method, so it is OK to allocate these
2611 2611 // statically.
2612 2612 G1CMBitMapClosure bitmap_closure(this, _cm);
2613 2613 G1CMOopClosure cm_oop_closure(_g1h, this);
2614 2614 set_cm_oop_closure(&cm_oop_closure);
2615 2615
2616 2616 if (_cm->has_overflown()) {
2617 2617 // This can happen if the mark stack overflows during a GC pause
2618 2618 // and this task, after a yield point, restarts. We have to abort
2619 2619 // as we need to get into the overflow protocol which happens
2620 2620 // right at the end of this task.
2621 2621 set_has_aborted();
2622 2622 }
2623 2623
2624 2624 // First drain any available SATB buffers. After this, we will not
2625 2625 // look at SATB buffers before the next invocation of this method.
2626 2626 // If enough completed SATB buffers are queued up, the regular clock
2627 2627 // will abort this task so that it restarts.
2628 2628 drain_satb_buffers();
2629 2629 // ...then partially drain the local queue and the global stack
2630 2630 drain_local_queue(true);
2631 2631 drain_global_stack(true);
2632 2632
2633 2633 do {
2634 2634 if (!has_aborted() && _curr_region != NULL) {
2635 2635 // This means that we're already holding on to a region.
2636 2636 assert(_finger != NULL, "if region is not NULL, then the finger "
2637 2637 "should not be NULL either");
2638 2638
2639 2639 // We might have restarted this task after an evacuation pause
2640 2640 // which might have evacuated the region we're holding on to
2641 2641 // underneath our feet. Let's read its limit again to make sure
2642 2642 // that we do not iterate over a region of the heap that
2643 2643 // contains garbage (update_region_limit() will also move
2644 2644 // _finger to the start of the region if it is found empty).
2645 2645 update_region_limit();
2646 2646 // We will start from _finger not from the start of the region,
2647 2647 // as we might be restarting this task after aborting half-way
2648 2648 // through scanning this region. In this case, _finger points to
2649 2649 // the address where we last found a marked object. If this is a
2650 2650 // fresh region, _finger points to start().
2651 2651 MemRegion mr = MemRegion(_finger, _region_limit);
2652 2652
2653 2653 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2654 2654 "humongous regions should go around loop once only");
2655 2655
2656 2656 // Some special cases:
2657 2657 // If the memory region is empty, we can just give up the region.
2658 2658 // If the current region is humongous then we only need to check
2659 2659 // the bitmap for the bit associated with the start of the object,
2660 2660 // scan the object if it's live, and give up the region.
2661 2661 // Otherwise, let's iterate over the bitmap of the part of the region
2662 2662 // that is left.
2663 2663 // If the iteration is successful, give up the region.
2664 2664 if (mr.is_empty()) {
2665 2665 giveup_current_region();
2666 2666 abort_marking_if_regular_check_fail();
2667 2667 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2668 2668 if (_next_mark_bitmap->is_marked(mr.start())) {
2669 2669 // The object is marked - apply the closure
2670 2670 bitmap_closure.do_addr(mr.start());
2671 2671 }
2672 2672 // Even if this task aborted while scanning the humongous object
2673 2673 // we can (and should) give up the current region.
2674 2674 giveup_current_region();
2675 2675 abort_marking_if_regular_check_fail();
2676 2676 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2677 2677 giveup_current_region();
2678 2678 abort_marking_if_regular_check_fail();
2679 2679 } else {
2680 2680 assert(has_aborted(), "currently the only way to do so");
2681 2681 // The only way to abort the bitmap iteration is to return
2682 2682 // false from the do_bit() method. However, inside the
2683 2683 // do_bit() method we move the _finger to point to the
2684 2684 // object currently being looked at. So, if we bail out, we
2685 2685 // have definitely set _finger to something non-null.
2686 2686 assert(_finger != NULL, "invariant");
2687 2687
2688 2688 // Region iteration was actually aborted. So now _finger
2689 2689 // points to the address of the object we last scanned. If we
2690 2690 // leave it there, when we restart this task, we will rescan
2691 2691 // the object. It is easy to avoid this. We move the finger by
2692 2692 // enough to point to the next possible object header.
2693 2693 assert(_finger < _region_limit, "invariant");
2694 2694 HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2695 2695 // Check if bitmap iteration was aborted while scanning the last object
2696 2696 if (new_finger >= _region_limit) {
2697 2697 giveup_current_region();
2698 2698 } else {
2699 2699 move_finger_to(new_finger);
2700 2700 }
2701 2701 }
2702 2702 }
2703 2703 // At this point we have either completed iterating over the
2704 2704 // region we were holding on to, or we have aborted.
2705 2705
2706 2706 // We then partially drain the local queue and the global stack.
2707 2707 // (Do we really need this?)
2708 2708 drain_local_queue(true);
2709 2709 drain_global_stack(true);
2710 2710
2711 2711 // Read the note on the claim_region() method on why it might
2712 2712 // return NULL with potentially more regions available for
2713 2713 // claiming and why we have to check out_of_regions() to determine
2714 2714 // whether we're done or not.
2715 2715 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2716 2716 // We are going to try to claim a new region. We should have
2717 2717 // given up on the previous one.
2718 2718 // Separated the asserts so that we know which one fires.
2719 2719 assert(_curr_region == NULL, "invariant");
2720 2720 assert(_finger == NULL, "invariant");
2721 2721 assert(_region_limit == NULL, "invariant");
2722 2722 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2723 2723 if (claimed_region != NULL) {
2724 2724 // Yes, we managed to claim one
2725 2725 setup_for_region(claimed_region);
2726 2726 assert(_curr_region == claimed_region, "invariant");
2727 2727 }
2728 2728 // It is important to call the regular clock here. It might take
2729 2729 // a while to claim a region if, for example, we hit a large
2730 2730 // block of empty regions. So we need to call the regular clock
2731 2731 // method once round the loop to make sure it's called
2732 2732 // frequently enough.
2733 2733 abort_marking_if_regular_check_fail();
2734 2734 }
2735 2735
2736 2736 if (!has_aborted() && _curr_region == NULL) {
2737 2737 assert(_cm->out_of_regions(),
2738 2738 "at this point we should be out of regions");
2739 2739 }
2740 2740 } while ( _curr_region != NULL && !has_aborted());
2741 2741
2742 2742 if (!has_aborted()) {
2743 2743 // We cannot check whether the global stack is empty, since other
2744 2744 // tasks might be pushing objects to it concurrently.
2745 2745 assert(_cm->out_of_regions(),
2746 2746 "at this point we should be out of regions");
2747 2747 // Try to reduce the number of available SATB buffers so that
2748 2748 // remark has less work to do.
2749 2749 drain_satb_buffers();
2750 2750 }
2751 2751
2752 2752 // Since we've done everything else, we can now totally drain the
2753 2753 // local queue and global stack.
2754 2754 drain_local_queue(false);
2755 2755 drain_global_stack(false);
2756 2756
2757 2757 // Attempt at work stealing from other task's queues.
2758 2758 if (do_stealing && !has_aborted()) {
2759 2759 // We have not aborted. This means that we have finished all that
2760 2760 // we could. Let's try to do some stealing...
2761 2761
2762 2762 // We cannot check whether the global stack is empty, since other
2763 2763 // tasks might be pushing objects to it concurrently.
2764 2764 assert(_cm->out_of_regions() && _task_queue->size() == 0,
2765 2765 "only way to reach here");
2766 2766 while (!has_aborted()) {
2767 2767 G1TaskQueueEntry entry;
2768 2768 if (_cm->try_stealing(_worker_id, entry)) {
2769 2769 scan_task_entry(entry);
2770 2770
2771 2771 // And since we're towards the end, let's totally drain the
2772 2772 // local queue and global stack.
2773 2773 drain_local_queue(false);
2774 2774 drain_global_stack(false);
2775 2775 } else {
2776 2776 break;
2777 2777 }
2778 2778 }
2779 2779 }
2780 2780
2781 2781 // We still haven't aborted. Now, let's try to get into the
2782 2782 // termination protocol.
2783 2783 if (do_termination && !has_aborted()) {
2784 2784 // We cannot check whether the global stack is empty, since other
2785 2785 // tasks might be concurrently pushing objects on it.
2786 2786 // Separated the asserts so that we know which one fires.
2787 2787 assert(_cm->out_of_regions(), "only way to reach here");
2788 2788 assert(_task_queue->size() == 0, "only way to reach here");
2789 2789 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2790 2790
2791 2791 // The G1CMTask class also extends the TerminatorTerminator class,
2792 2792 // hence its should_exit_termination() method will also decide
2793 2793 // whether to exit the termination protocol or not.
2794 2794 bool finished = (is_serial ||
2795 2795 _cm->terminator()->offer_termination(this));
2796 2796 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2797 2797 _termination_time_ms +=
2798 2798 termination_end_time_ms - _termination_start_time_ms;
2799 2799
2800 2800 if (finished) {
2801 2801 // We're all done.
2802 2802
2803 2803 // We can now guarantee that the global stack is empty, since
2804 2804 // all other tasks have finished. We separated the guarantees so
2805 2805 // that, if a condition is false, we can immediately find out
2806 2806 // which one.
2807 2807 guarantee(_cm->out_of_regions(), "only way to reach here");
2808 2808 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2809 2809 guarantee(_task_queue->size() == 0, "only way to reach here");
2810 2810 guarantee(!_cm->has_overflown(), "only way to reach here");
2811 2811 guarantee(!has_aborted(), "should never happen if termination has completed");
2812 2812 } else {
2813 2813 // Apparently there's more work to do. Let's abort this task. It
2814 2814 // will restart it and we can hopefully find more things to do.
2815 2815 set_has_aborted();
2816 2816 }
2817 2817 }
2818 2818
2819 2819 // Mainly for debugging purposes to make sure that a pointer to the
2820 2820 // closure which was statically allocated in this frame doesn't
2821 2821 // escape it by accident.
2822 2822 set_cm_oop_closure(NULL);
2823 2823 double end_time_ms = os::elapsedVTime() * 1000.0;
2824 2824 double elapsed_time_ms = end_time_ms - _start_time_ms;
2825 2825 // Update the step history.
2826 2826 _step_times_ms.add(elapsed_time_ms);
2827 2827
2828 2828 if (has_aborted()) {
2829 2829 // The task was aborted for some reason.
2830 2830 if (_has_timed_out) {
2831 2831 double diff_ms = elapsed_time_ms - _time_target_ms;
2832 2832 // Keep statistics of how well we did with respect to hitting
2833 2833 // our target only if we actually timed out (if we aborted for
2834 2834 // other reasons, then the results might get skewed).
2835 2835 _marking_step_diff_ms.add(diff_ms);
2836 2836 }
2837 2837
2838 2838 if (_cm->has_overflown()) {
2839 2839 // This is the interesting one. We aborted because a global
2840 2840 // overflow was raised. This means we have to restart the
2841 2841 // marking phase and start iterating over regions. However, in
2842 2842 // order to do this we have to make sure that all tasks stop
2843 2843 // what they are doing and re-initialize in a safe manner. We
2844 2844 // will achieve this with the use of two barrier sync points.
2845 2845
2846 2846 if (!is_serial) {
2847 2847 // We only need to enter the sync barrier if being called
2848 2848 // from a parallel context
2849 2849 _cm->enter_first_sync_barrier(_worker_id);
2850 2850
2851 2851 // When we exit this sync barrier we know that all tasks have
2852 2852 // stopped doing marking work. So, it's now safe to
2853 2853 // re-initialize our data structures.
2854 2854 }
2855 2855
2856 2856 clear_region_fields();
2857 2857 flush_mark_stats_cache();
2858 2858
2859 2859 if (!is_serial) {
2860 2860 // If we're executing the concurrent phase of marking, reset the marking
2861 2861 // state; otherwise the marking state is reset after reference processing,
2862 2862 // during the remark pause.
2863 2863 // If we reset here as a result of an overflow during the remark we will
2864 2864 // see assertion failures from any subsequent set_concurrency_and_phase()
2865 2865 // calls.
2866 2866 if (_cm->concurrent() && _worker_id == 0) {
2867 2867 // Worker 0 is responsible for clearing the global data structures because
2868 2868 // of an overflow. During STW we should not clear the overflow flag (in
2869 2869 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2870 2870 // method to abort the pause and restart concurrent marking.
2871 2871 _cm->reset_marking_for_restart();
2872 2872
2873 2873 log_info(gc, marking)("Concurrent Mark reset for overflow");
2874 2874 }
2875 2875
2876 2876 // ...and enter the second barrier.
2877 2877 _cm->enter_second_sync_barrier(_worker_id);
2878 2878 }
2879 2879 // At this point, if we're during the concurrent phase of
2880 2880 // marking, everything has been re-initialized and we're
2881 2881 // ready to restart.
2882 2882 }
2883 2883 }
2884 2884 }
2885 2885
2886 2886 G1CMTask::G1CMTask(uint worker_id,
2887 2887 G1ConcurrentMark* cm,
2888 2888 G1CMTaskQueue* task_queue,
2889 2889 G1RegionMarkStats* mark_stats,
2890 2890 uint max_regions) :
2891 2891 _objArray_processor(this),
2892 2892 _worker_id(worker_id),
2893 2893 _g1h(G1CollectedHeap::heap()),
2894 2894 _cm(cm),
2895 2895 _next_mark_bitmap(NULL),
2896 2896 _task_queue(task_queue),
2897 2897 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2898 2898 _calls(0),
2899 2899 _time_target_ms(0.0),
2900 2900 _start_time_ms(0.0),
2901 2901 _cm_oop_closure(NULL),
2902 2902 _curr_region(NULL),
2903 2903 _finger(NULL),
2904 2904 _region_limit(NULL),
2905 2905 _words_scanned(0),
2906 2906 _words_scanned_limit(0),
2907 2907 _real_words_scanned_limit(0),
2908 2908 _refs_reached(0),
2909 2909 _refs_reached_limit(0),
2910 2910 _real_refs_reached_limit(0),
2911 2911 _has_aborted(false),
2912 2912 _has_timed_out(false),
2913 2913 _draining_satb_buffers(false),
2914 2914 _step_times_ms(),
2915 2915 _elapsed_time_ms(0.0),
2916 2916 _termination_time_ms(0.0),
2917 2917 _termination_start_time_ms(0.0),
2918 2918 _marking_step_diff_ms()
2919 2919 {
2920 2920 guarantee(task_queue != NULL, "invariant");
2921 2921
2922 2922 _marking_step_diff_ms.add(0.5);
2923 2923 }
2924 2924
2925 2925 // These are formatting macros that are used below to ensure
2926 2926 // consistent formatting. The *_H_* versions are used to format the
2927 2927 // header for a particular value and they should be kept consistent
2928 2928 // with the corresponding macro. Also note that most of the macros add
2929 2929 // the necessary white space (as a prefix) which makes them a bit
2930 2930 // easier to compose.
2931 2931
2932 2932 // All the output lines are prefixed with this string to be able to
2933 2933 // identify them easily in a large log file.
2934 2934 #define G1PPRL_LINE_PREFIX "###"
2935 2935
2936 2936 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
2937 2937 #ifdef _LP64
2938 2938 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
2939 2939 #else // _LP64
2940 2940 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
2941 2941 #endif // _LP64
2942 2942
2943 2943 // For per-region info
2944 2944 #define G1PPRL_TYPE_FORMAT " %-4s"
2945 2945 #define G1PPRL_TYPE_H_FORMAT " %4s"
2946 2946 #define G1PPRL_STATE_FORMAT " %-5s"
2947 2947 #define G1PPRL_STATE_H_FORMAT " %5s"
2948 2948 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
2949 2949 #define G1PPRL_BYTE_H_FORMAT " %9s"
2950 2950 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
2951 2951 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
2952 2952
2953 2953 // For summary info
2954 2954 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
2955 2955 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
2956 2956 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
2957 2957 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2958 2958
2959 2959 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2960 2960 _total_used_bytes(0), _total_capacity_bytes(0),
2961 2961 _total_prev_live_bytes(0), _total_next_live_bytes(0),
2962 2962 _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2963 2963 {
2964 2964 if (!log_is_enabled(Trace, gc, liveness)) {
2965 2965 return;
2966 2966 }
2967 2967
2968 2968 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2969 2969 MemRegion g1_reserved = g1h->g1_reserved();
2970 2970 double now = os::elapsedTime();
2971 2971
2972 2972 // Print the header of the output.
2973 2973 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2974 2974 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2975 2975 G1PPRL_SUM_ADDR_FORMAT("reserved")
2976 2976 G1PPRL_SUM_BYTE_FORMAT("region-size"),
2977 2977 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2978 2978 HeapRegion::GrainBytes);
2979 2979 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2980 2980 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2981 2981 G1PPRL_TYPE_H_FORMAT
2982 2982 G1PPRL_ADDR_BASE_H_FORMAT
2983 2983 G1PPRL_BYTE_H_FORMAT
2984 2984 G1PPRL_BYTE_H_FORMAT
2985 2985 G1PPRL_BYTE_H_FORMAT
2986 2986 G1PPRL_DOUBLE_H_FORMAT
2987 2987 G1PPRL_BYTE_H_FORMAT
2988 2988 G1PPRL_STATE_H_FORMAT
2989 2989 G1PPRL_BYTE_H_FORMAT,
2990 2990 "type", "address-range",
2991 2991 "used", "prev-live", "next-live", "gc-eff",
2992 2992 "remset", "state", "code-roots");
2993 2993 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2994 2994 G1PPRL_TYPE_H_FORMAT
2995 2995 G1PPRL_ADDR_BASE_H_FORMAT
2996 2996 G1PPRL_BYTE_H_FORMAT
2997 2997 G1PPRL_BYTE_H_FORMAT
2998 2998 G1PPRL_BYTE_H_FORMAT
2999 2999 G1PPRL_DOUBLE_H_FORMAT
3000 3000 G1PPRL_BYTE_H_FORMAT
3001 3001 G1PPRL_STATE_H_FORMAT
3002 3002 G1PPRL_BYTE_H_FORMAT,
3003 3003 "", "",
3004 3004 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3005 3005 "(bytes)", "", "(bytes)");
3006 3006 }
3007 3007
3008 3008 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3009 3009 if (!log_is_enabled(Trace, gc, liveness)) {
3010 3010 return false;
3011 3011 }
3012 3012
3013 3013 const char* type = r->get_type_str();
3014 3014 HeapWord* bottom = r->bottom();
3015 3015 HeapWord* end = r->end();
3016 3016 size_t capacity_bytes = r->capacity();
3017 3017 size_t used_bytes = r->used();
3018 3018 size_t prev_live_bytes = r->live_bytes();
3019 3019 size_t next_live_bytes = r->next_live_bytes();
3020 3020 double gc_eff = r->gc_efficiency();
3021 3021 size_t remset_bytes = r->rem_set()->mem_size();
3022 3022 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3023 3023 const char* remset_type = r->rem_set()->get_short_state_str();
3024 3024
3025 3025 _total_used_bytes += used_bytes;
3026 3026 _total_capacity_bytes += capacity_bytes;
3027 3027 _total_prev_live_bytes += prev_live_bytes;
3028 3028 _total_next_live_bytes += next_live_bytes;
3029 3029 _total_remset_bytes += remset_bytes;
3030 3030 _total_strong_code_roots_bytes += strong_code_roots_bytes;
3031 3031
3032 3032 // Print a line for this particular region.
3033 3033 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3034 3034 G1PPRL_TYPE_FORMAT
3035 3035 G1PPRL_ADDR_BASE_FORMAT
3036 3036 G1PPRL_BYTE_FORMAT
3037 3037 G1PPRL_BYTE_FORMAT
3038 3038 G1PPRL_BYTE_FORMAT
3039 3039 G1PPRL_DOUBLE_FORMAT
3040 3040 G1PPRL_BYTE_FORMAT
3041 3041 G1PPRL_STATE_FORMAT
3042 3042 G1PPRL_BYTE_FORMAT,
3043 3043 type, p2i(bottom), p2i(end),
3044 3044 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3045 3045 remset_bytes, remset_type, strong_code_roots_bytes);
3046 3046
3047 3047 return false;
3048 3048 }
3049 3049
3050 3050 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3051 3051 if (!log_is_enabled(Trace, gc, liveness)) {
3052 3052 return;
3053 3053 }
3054 3054
3055 3055 // add static memory usages to remembered set sizes
3056 3056 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3057 3057 // Print the footer of the output.
3058 3058 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3059 3059 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3060 3060 " SUMMARY"
3061 3061 G1PPRL_SUM_MB_FORMAT("capacity")
3062 3062 G1PPRL_SUM_MB_PERC_FORMAT("used")
3063 3063 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3064 3064 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3065 3065 G1PPRL_SUM_MB_FORMAT("remset")
3066 3066 G1PPRL_SUM_MB_FORMAT("code-roots"),
3067 3067 bytes_to_mb(_total_capacity_bytes),
3068 3068 bytes_to_mb(_total_used_bytes),
3069 3069 percent_of(_total_used_bytes, _total_capacity_bytes),
3070 3070 bytes_to_mb(_total_prev_live_bytes),
3071 3071 percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3072 3072 bytes_to_mb(_total_next_live_bytes),
3073 3073 percent_of(_total_next_live_bytes, _total_capacity_bytes),
3074 3074 bytes_to_mb(_total_remset_bytes),
3075 3075 bytes_to_mb(_total_strong_code_roots_bytes));
3076 3076 }
↓ open down ↓ |
1854 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX