Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics
Split |
Close |
Expand all |
Collapse all |
--- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoaderDataGraph.hpp"
27 27 #include "code/codeCache.hpp"
28 28 #include "gc/g1/g1BarrierSet.hpp"
29 29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 30 #include "gc/g1/g1CollectorState.hpp"
31 31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 33 #include "gc/g1/g1DirtyCardQueue.hpp"
34 34 #include "gc/g1/g1HeapVerifier.hpp"
35 35 #include "gc/g1/g1OopClosures.inline.hpp"
36 36 #include "gc/g1/g1Policy.hpp"
37 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
38 38 #include "gc/g1/g1StringDedup.hpp"
39 39 #include "gc/g1/g1ThreadLocalData.hpp"
40 40 #include "gc/g1/g1Trace.hpp"
41 41 #include "gc/g1/heapRegion.inline.hpp"
42 42 #include "gc/g1/heapRegionRemSet.hpp"
43 43 #include "gc/g1/heapRegionSet.inline.hpp"
44 44 #include "gc/shared/gcId.hpp"
45 45 #include "gc/shared/gcTimer.hpp"
46 46 #include "gc/shared/gcTraceTime.inline.hpp"
47 47 #include "gc/shared/gcVMOperations.hpp"
48 48 #include "gc/shared/genOopClosures.inline.hpp"
49 49 #include "gc/shared/referencePolicy.hpp"
50 50 #include "gc/shared/strongRootsScope.hpp"
51 51 #include "gc/shared/suspendibleThreadSet.hpp"
52 52 #include "gc/shared/taskqueue.inline.hpp"
53 53 #include "gc/shared/weakProcessor.inline.hpp"
54 54 #include "gc/shared/workerPolicy.hpp"
55 55 #include "include/jvm.h"
56 56 #include "logging/log.hpp"
57 57 #include "memory/allocation.hpp"
58 58 #include "memory/iterator.hpp"
59 59 #include "memory/resourceArea.hpp"
60 60 #include "memory/universe.hpp"
61 61 #include "oops/access.inline.hpp"
62 62 #include "oops/oop.inline.hpp"
63 63 #include "runtime/atomic.hpp"
64 64 #include "runtime/handles.inline.hpp"
65 65 #include "runtime/java.hpp"
66 66 #include "runtime/orderAccess.hpp"
67 67 #include "runtime/prefetch.inline.hpp"
68 68 #include "services/memTracker.hpp"
69 69 #include "utilities/align.hpp"
70 70 #include "utilities/growableArray.hpp"
71 71
72 72 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
73 73 assert(addr < _cm->finger(), "invariant");
74 74 assert(addr >= _task->finger(), "invariant");
75 75
76 76 // We move that task's local finger along.
77 77 _task->move_finger_to(addr);
78 78
79 79 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
80 80 // we only partially drain the local queue and global stack
81 81 _task->drain_local_queue(true);
82 82 _task->drain_global_stack(true);
83 83
84 84 // if the has_aborted flag has been raised, we need to bail out of
85 85 // the iteration
86 86 return !_task->has_aborted();
87 87 }
88 88
89 89 G1CMMarkStack::G1CMMarkStack() :
90 90 _max_chunk_capacity(0),
91 91 _base(NULL),
92 92 _chunk_capacity(0) {
93 93 set_empty();
94 94 }
95 95
96 96 bool G1CMMarkStack::resize(size_t new_capacity) {
97 97 assert(is_empty(), "Only resize when stack is empty.");
98 98 assert(new_capacity <= _max_chunk_capacity,
99 99 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
100 100
101 101 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
102 102
103 103 if (new_base == NULL) {
104 104 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
105 105 return false;
106 106 }
107 107 // Release old mapping.
108 108 if (_base != NULL) {
109 109 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
110 110 }
111 111
112 112 _base = new_base;
113 113 _chunk_capacity = new_capacity;
114 114 set_empty();
115 115
116 116 return true;
117 117 }
118 118
119 119 size_t G1CMMarkStack::capacity_alignment() {
120 120 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
121 121 }
122 122
123 123 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
124 124 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
125 125
126 126 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
127 127
128 128 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
129 129 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
130 130
131 131 guarantee(initial_chunk_capacity <= _max_chunk_capacity,
132 132 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
133 133 _max_chunk_capacity,
134 134 initial_chunk_capacity);
135 135
136 136 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
137 137 initial_chunk_capacity, _max_chunk_capacity);
138 138
139 139 return resize(initial_chunk_capacity);
140 140 }
141 141
142 142 void G1CMMarkStack::expand() {
143 143 if (_chunk_capacity == _max_chunk_capacity) {
144 144 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
145 145 return;
146 146 }
147 147 size_t old_capacity = _chunk_capacity;
148 148 // Double capacity if possible
149 149 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
150 150
151 151 if (resize(new_capacity)) {
152 152 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
153 153 old_capacity, new_capacity);
154 154 } else {
155 155 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
156 156 old_capacity, new_capacity);
157 157 }
158 158 }
159 159
160 160 G1CMMarkStack::~G1CMMarkStack() {
161 161 if (_base != NULL) {
162 162 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
163 163 }
164 164 }
165 165
166 166 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
167 167 elem->next = *list;
168 168 *list = elem;
169 169 }
170 170
171 171 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
172 172 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
173 173 add_chunk_to_list(&_chunk_list, elem);
174 174 _chunks_in_chunk_list++;
175 175 }
176 176
177 177 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
178 178 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
179 179 add_chunk_to_list(&_free_list, elem);
180 180 }
181 181
182 182 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
183 183 TaskQueueEntryChunk* result = *list;
184 184 if (result != NULL) {
185 185 *list = (*list)->next;
186 186 }
187 187 return result;
188 188 }
189 189
190 190 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
191 191 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
192 192 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
193 193 if (result != NULL) {
194 194 _chunks_in_chunk_list--;
195 195 }
196 196 return result;
197 197 }
198 198
199 199 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
200 200 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
201 201 return remove_chunk_from_list(&_free_list);
202 202 }
203 203
204 204 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
205 205 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
206 206 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
207 207 // wraparound of _hwm.
208 208 if (_hwm >= _chunk_capacity) {
209 209 return NULL;
210 210 }
211 211
212 212 size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
213 213 if (cur_idx >= _chunk_capacity) {
214 214 return NULL;
215 215 }
216 216
217 217 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
218 218 result->next = NULL;
219 219 return result;
220 220 }
221 221
222 222 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
223 223 // Get a new chunk.
224 224 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
225 225
226 226 if (new_chunk == NULL) {
227 227 // Did not get a chunk from the free list. Allocate from backing memory.
228 228 new_chunk = allocate_new_chunk();
229 229
230 230 if (new_chunk == NULL) {
231 231 return false;
232 232 }
233 233 }
234 234
235 235 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
236 236
237 237 add_chunk_to_chunk_list(new_chunk);
238 238
239 239 return true;
240 240 }
241 241
242 242 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
243 243 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
244 244
245 245 if (cur == NULL) {
246 246 return false;
247 247 }
248 248
249 249 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
250 250
251 251 add_chunk_to_free_list(cur);
252 252 return true;
253 253 }
254 254
255 255 void G1CMMarkStack::set_empty() {
256 256 _chunks_in_chunk_list = 0;
257 257 _hwm = 0;
258 258 _chunk_list = NULL;
259 259 _free_list = NULL;
260 260 }
261 261
262 262 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
263 263 _root_regions(NULL),
264 264 _max_regions(max_regions),
265 265 _num_root_regions(0),
266 266 _claimed_root_regions(0),
267 267 _scan_in_progress(false),
268 268 _should_abort(false) {
269 269 _root_regions = new MemRegion[_max_regions];
270 270 if (_root_regions == NULL) {
271 271 vm_exit_during_initialization("Could not allocate root MemRegion set.");
272 272 }
273 273 }
274 274
275 275 G1CMRootMemRegions::~G1CMRootMemRegions() {
276 276 delete[] _root_regions;
277 277 }
278 278
279 279 void G1CMRootMemRegions::reset() {
280 280 _num_root_regions = 0;
281 281 }
282 282
283 283 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
284 284 assert_at_safepoint();
285 285 size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
286 286 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
287 287 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
288 288 "end (" PTR_FORMAT ")", p2i(start), p2i(end));
289 289 _root_regions[idx].set_start(start);
290 290 _root_regions[idx].set_end(end);
291 291 }
292 292
293 293 void G1CMRootMemRegions::prepare_for_scan() {
294 294 assert(!scan_in_progress(), "pre-condition");
295 295
296 296 _scan_in_progress = _num_root_regions > 0;
297 297
298 298 _claimed_root_regions = 0;
299 299 _should_abort = false;
300 300 }
301 301
302 302 const MemRegion* G1CMRootMemRegions::claim_next() {
303 303 if (_should_abort) {
304 304 // If someone has set the should_abort flag, we return NULL to
305 305 // force the caller to bail out of their loop.
306 306 return NULL;
307 307 }
308 308
309 309 if (_claimed_root_regions >= _num_root_regions) {
310 310 return NULL;
311 311 }
312 312
313 313 size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
314 314 if (claimed_index < _num_root_regions) {
315 315 return &_root_regions[claimed_index];
316 316 }
317 317 return NULL;
318 318 }
319 319
320 320 uint G1CMRootMemRegions::num_root_regions() const {
321 321 return (uint)_num_root_regions;
322 322 }
323 323
324 324 void G1CMRootMemRegions::notify_scan_done() {
325 325 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
326 326 _scan_in_progress = false;
327 327 RootRegionScan_lock->notify_all();
328 328 }
329 329
330 330 void G1CMRootMemRegions::cancel_scan() {
331 331 notify_scan_done();
332 332 }
333 333
334 334 void G1CMRootMemRegions::scan_finished() {
335 335 assert(scan_in_progress(), "pre-condition");
336 336
337 337 if (!_should_abort) {
338 338 assert(_claimed_root_regions >= num_root_regions(),
339 339 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
340 340 _claimed_root_regions, num_root_regions());
341 341 }
342 342
343 343 notify_scan_done();
344 344 }
345 345
346 346 bool G1CMRootMemRegions::wait_until_scan_finished() {
347 347 if (!scan_in_progress()) {
348 348 return false;
349 349 }
350 350
351 351 {
352 352 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
353 353 while (scan_in_progress()) {
354 354 ml.wait();
355 355 }
356 356 }
357 357 return true;
358 358 }
359 359
360 360 // Returns the maximum number of workers to be used in a concurrent
361 361 // phase based on the number of GC workers being used in a STW
362 362 // phase.
363 363 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
364 364 return MAX2((num_gc_workers + 2) / 4, 1U);
365 365 }
366 366
367 367 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
368 368 G1RegionToSpaceMapper* prev_bitmap_storage,
369 369 G1RegionToSpaceMapper* next_bitmap_storage) :
370 370 // _cm_thread set inside the constructor
371 371 _g1h(g1h),
372 372 _completed_initialization(false),
373 373
374 374 _mark_bitmap_1(),
375 375 _mark_bitmap_2(),
376 376 _prev_mark_bitmap(&_mark_bitmap_1),
377 377 _next_mark_bitmap(&_mark_bitmap_2),
378 378
379 379 _heap(_g1h->reserved_region()),
380 380
381 381 _root_regions(_g1h->max_regions()),
382 382
383 383 _global_mark_stack(),
384 384
385 385 // _finger set in set_non_marking_state
386 386
387 387 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
388 388 _max_num_tasks(ParallelGCThreads),
389 389 // _num_active_tasks set in set_non_marking_state()
390 390 // _tasks set inside the constructor
391 391
392 392 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
393 393 _terminator((int) _max_num_tasks, _task_queues),
394 394
395 395 _first_overflow_barrier_sync(),
396 396 _second_overflow_barrier_sync(),
397 397
398 398 _has_overflown(false),
399 399 _concurrent(false),
400 400 _has_aborted(false),
401 401 _restart_for_overflow(false),
402 402 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
403 403 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
404 404
405 405 // _verbose_level set below
406 406
407 407 _init_times(),
408 408 _remark_times(),
409 409 _remark_mark_times(),
410 410 _remark_weak_ref_times(),
411 411 _cleanup_times(),
412 412 _total_cleanup_time(0.0),
413 413
414 414 _accum_task_vtime(NULL),
415 415
416 416 _concurrent_workers(NULL),
417 417 _num_concurrent_workers(0),
418 418 _max_concurrent_workers(0),
419 419
420 420 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
421 421 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
422 422 {
423 423 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
424 424 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
425 425
426 426 // Create & start ConcurrentMark thread.
427 427 _cm_thread = new G1ConcurrentMarkThread(this);
428 428 if (_cm_thread->osthread() == NULL) {
429 429 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
430 430 }
431 431
432 432 assert(CGC_lock != NULL, "CGC_lock must be initialized");
433 433
434 434 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
435 435 // Calculate the number of concurrent worker threads by scaling
436 436 // the number of parallel GC threads.
437 437 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
438 438 FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
439 439 }
440 440
441 441 assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
442 442 if (ConcGCThreads > ParallelGCThreads) {
443 443 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
444 444 ConcGCThreads, ParallelGCThreads);
445 445 return;
446 446 }
447 447
448 448 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
449 449 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
450 450
451 451 _num_concurrent_workers = ConcGCThreads;
452 452 _max_concurrent_workers = _num_concurrent_workers;
453 453
454 454 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
455 455 _concurrent_workers->initialize_workers();
456 456
457 457 if (FLAG_IS_DEFAULT(MarkStackSize)) {
458 458 size_t mark_stack_size =
459 459 MIN2(MarkStackSizeMax,
460 460 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
461 461 // Verify that the calculated value for MarkStackSize is in range.
462 462 // It would be nice to use the private utility routine from Arguments.
463 463 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
464 464 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
465 465 "must be between 1 and " SIZE_FORMAT,
466 466 mark_stack_size, MarkStackSizeMax);
467 467 return;
468 468 }
469 469 FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
470 470 } else {
471 471 // Verify MarkStackSize is in range.
472 472 if (FLAG_IS_CMDLINE(MarkStackSize)) {
473 473 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
474 474 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
475 475 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
476 476 "must be between 1 and " SIZE_FORMAT,
477 477 MarkStackSize, MarkStackSizeMax);
478 478 return;
479 479 }
480 480 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
481 481 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
482 482 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
483 483 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
484 484 MarkStackSize, MarkStackSizeMax);
485 485 return;
486 486 }
487 487 }
488 488 }
489 489 }
490 490
491 491 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
492 492 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
493 493 }
494 494
495 495 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
496 496 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
497 497
498 498 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
499 499 _num_active_tasks = _max_num_tasks;
500 500
501 501 for (uint i = 0; i < _max_num_tasks; ++i) {
502 502 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
503 503 task_queue->initialize();
504 504 _task_queues->register_queue(i, task_queue);
505 505
506 506 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
507 507
508 508 _accum_task_vtime[i] = 0.0;
509 509 }
510 510
511 511 reset_at_marking_complete();
512 512 _completed_initialization = true;
513 513 }
514 514
515 515 void G1ConcurrentMark::reset() {
516 516 _has_aborted = false;
517 517
518 518 reset_marking_for_restart();
519 519
520 520 // Reset all tasks, since different phases will use different number of active
521 521 // threads. So, it's easiest to have all of them ready.
522 522 for (uint i = 0; i < _max_num_tasks; ++i) {
523 523 _tasks[i]->reset(_next_mark_bitmap);
524 524 }
525 525
526 526 uint max_regions = _g1h->max_regions();
527 527 for (uint i = 0; i < max_regions; i++) {
528 528 _top_at_rebuild_starts[i] = NULL;
529 529 _region_mark_stats[i].clear();
530 530 }
531 531 }
532 532
533 533 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
534 534 for (uint j = 0; j < _max_num_tasks; ++j) {
535 535 _tasks[j]->clear_mark_stats_cache(region_idx);
536 536 }
537 537 _top_at_rebuild_starts[region_idx] = NULL;
538 538 _region_mark_stats[region_idx].clear();
539 539 }
540 540
541 541 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
542 542 uint const region_idx = r->hrm_index();
543 543 if (r->is_humongous()) {
544 544 assert(r->is_starts_humongous(), "Got humongous continues region here");
545 545 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
546 546 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
547 547 clear_statistics_in_region(j);
548 548 }
549 549 } else {
550 550 clear_statistics_in_region(region_idx);
551 551 }
552 552 }
553 553
554 554 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
555 555 if (bitmap->is_marked(addr)) {
556 556 bitmap->clear(addr);
557 557 }
558 558 }
559 559
560 560 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
561 561 assert_at_safepoint_on_vm_thread();
562 562
563 563 // Need to clear all mark bits of the humongous object.
564 564 clear_mark_if_set(_prev_mark_bitmap, r->bottom());
565 565 clear_mark_if_set(_next_mark_bitmap, r->bottom());
566 566
567 567 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
568 568 return;
569 569 }
570 570
571 571 // Clear any statistics about the region gathered so far.
572 572 clear_statistics(r);
573 573 }
574 574
575 575 void G1ConcurrentMark::reset_marking_for_restart() {
576 576 _global_mark_stack.set_empty();
577 577
578 578 // Expand the marking stack, if we have to and if we can.
579 579 if (has_overflown()) {
580 580 _global_mark_stack.expand();
581 581
582 582 uint max_regions = _g1h->max_regions();
583 583 for (uint i = 0; i < max_regions; i++) {
584 584 _region_mark_stats[i].clear_during_overflow();
585 585 }
586 586 }
587 587
588 588 clear_has_overflown();
589 589 _finger = _heap.start();
590 590
591 591 for (uint i = 0; i < _max_num_tasks; ++i) {
592 592 G1CMTaskQueue* queue = _task_queues->queue(i);
593 593 queue->set_empty();
594 594 }
595 595 }
596 596
597 597 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
598 598 assert(active_tasks <= _max_num_tasks, "we should not have more");
599 599
600 600 _num_active_tasks = active_tasks;
601 601 // Need to update the three data structures below according to the
602 602 // number of active threads for this phase.
603 603 _terminator.terminator()->reset_for_reuse((int) active_tasks);
604 604 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
605 605 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
606 606 }
607 607
608 608 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
609 609 set_concurrency(active_tasks);
610 610
611 611 _concurrent = concurrent;
612 612
613 613 if (!concurrent) {
614 614 // At this point we should be in a STW phase, and completed marking.
615 615 assert_at_safepoint_on_vm_thread();
616 616 assert(out_of_regions(),
617 617 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
618 618 p2i(_finger), p2i(_heap.end()));
619 619 }
620 620 }
621 621
622 622 void G1ConcurrentMark::reset_at_marking_complete() {
623 623 // We set the global marking state to some default values when we're
624 624 // not doing marking.
625 625 reset_marking_for_restart();
626 626 _num_active_tasks = 0;
627 627 }
628 628
629 629 G1ConcurrentMark::~G1ConcurrentMark() {
630 630 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
631 631 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
632 632 // The G1ConcurrentMark instance is never freed.
633 633 ShouldNotReachHere();
634 634 }
635 635
636 636 class G1ClearBitMapTask : public AbstractGangTask {
637 637 public:
638 638 static size_t chunk_size() { return M; }
639 639
640 640 private:
641 641 // Heap region closure used for clearing the given mark bitmap.
642 642 class G1ClearBitmapHRClosure : public HeapRegionClosure {
643 643 private:
644 644 G1CMBitMap* _bitmap;
645 645 G1ConcurrentMark* _cm;
646 646 public:
647 647 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
648 648 }
649 649
650 650 virtual bool do_heap_region(HeapRegion* r) {
651 651 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
652 652
653 653 HeapWord* cur = r->bottom();
654 654 HeapWord* const end = r->end();
655 655
656 656 while (cur < end) {
657 657 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
658 658 _bitmap->clear_range(mr);
659 659
660 660 cur += chunk_size_in_words;
661 661
662 662 // Abort iteration if after yielding the marking has been aborted.
663 663 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
664 664 return true;
665 665 }
666 666 // Repeat the asserts from before the start of the closure. We will do them
667 667 // as asserts here to minimize their overhead on the product. However, we
668 668 // will have them as guarantees at the beginning / end of the bitmap
669 669 // clearing to get some checking in the product.
670 670 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
671 671 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
672 672 }
673 673 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
674 674
675 675 return false;
676 676 }
677 677 };
678 678
679 679 G1ClearBitmapHRClosure _cl;
680 680 HeapRegionClaimer _hr_claimer;
681 681 bool _suspendible; // If the task is suspendible, workers must join the STS.
682 682
683 683 public:
684 684 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
685 685 AbstractGangTask("G1 Clear Bitmap"),
686 686 _cl(bitmap, suspendible ? cm : NULL),
687 687 _hr_claimer(n_workers),
688 688 _suspendible(suspendible)
689 689 { }
690 690
691 691 void work(uint worker_id) {
692 692 SuspendibleThreadSetJoiner sts_join(_suspendible);
693 693 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
694 694 }
695 695
696 696 bool is_complete() {
697 697 return _cl.is_complete();
698 698 }
699 699 };
700 700
701 701 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
702 702 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
703 703
704 704 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
705 705 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
706 706
707 707 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
708 708
709 709 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
710 710
711 711 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
712 712 workers->run_task(&cl, num_workers);
713 713 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
714 714 }
715 715
716 716 void G1ConcurrentMark::cleanup_for_next_mark() {
717 717 // Make sure that the concurrent mark thread looks to still be in
718 718 // the current cycle.
719 719 guarantee(cm_thread()->during_cycle(), "invariant");
720 720
721 721 // We are finishing up the current cycle by clearing the next
722 722 // marking bitmap and getting it ready for the next cycle. During
723 723 // this time no other cycle can start. So, let's make sure that this
724 724 // is the case.
725 725 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
726 726
727 727 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
728 728
729 729 // Repeat the asserts from above.
730 730 guarantee(cm_thread()->during_cycle(), "invariant");
731 731 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
732 732 }
733 733
734 734 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
735 735 assert_at_safepoint_on_vm_thread();
736 736 clear_bitmap(_prev_mark_bitmap, workers, false);
737 737 }
738 738
739 739 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
740 740 public:
741 741 bool do_heap_region(HeapRegion* r) {
742 742 r->note_start_of_marking();
743 743 return false;
744 744 }
745 745 };
746 746
747 747 void G1ConcurrentMark::pre_initial_mark() {
748 748 assert_at_safepoint_on_vm_thread();
749 749
750 750 // Reset marking state.
751 751 reset();
752 752
753 753 // For each region note start of marking.
754 754 NoteStartOfMarkHRClosure startcl;
755 755 _g1h->heap_region_iterate(&startcl);
756 756
757 757 _root_regions.reset();
758 758 }
759 759
760 760
761 761 void G1ConcurrentMark::post_initial_mark() {
762 762 // Start Concurrent Marking weak-reference discovery.
763 763 ReferenceProcessor* rp = _g1h->ref_processor_cm();
764 764 // enable ("weak") refs discovery
765 765 rp->enable_discovery();
766 766 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
767 767
768 768 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
769 769 // This is the start of the marking cycle, we're expected all
770 770 // threads to have SATB queues with active set to false.
771 771 satb_mq_set.set_active_all_threads(true, /* new active value */
772 772 false /* expected_active */);
773 773
774 774 _root_regions.prepare_for_scan();
775 775
776 776 // update_g1_committed() will be called at the end of an evac pause
777 777 // when marking is on. So, it's also called at the end of the
778 778 // initial-mark pause to update the heap end, if the heap expands
779 779 // during it. No need to call it here.
780 780 }
781 781
782 782 /*
783 783 * Notice that in the next two methods, we actually leave the STS
784 784 * during the barrier sync and join it immediately afterwards. If we
785 785 * do not do this, the following deadlock can occur: one thread could
786 786 * be in the barrier sync code, waiting for the other thread to also
787 787 * sync up, whereas another one could be trying to yield, while also
788 788 * waiting for the other threads to sync up too.
789 789 *
790 790 * Note, however, that this code is also used during remark and in
791 791 * this case we should not attempt to leave / enter the STS, otherwise
792 792 * we'll either hit an assert (debug / fastdebug) or deadlock
793 793 * (product). So we should only leave / enter the STS if we are
794 794 * operating concurrently.
795 795 *
796 796 * Because the thread that does the sync barrier has left the STS, it
797 797 * is possible to be suspended for a Full GC or an evacuation pause
798 798 * could occur. This is actually safe, since the entering the sync
799 799 * barrier is one of the last things do_marking_step() does, and it
800 800 * doesn't manipulate any data structures afterwards.
801 801 */
802 802
803 803 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
804 804 bool barrier_aborted;
805 805 {
806 806 SuspendibleThreadSetLeaver sts_leave(concurrent());
807 807 barrier_aborted = !_first_overflow_barrier_sync.enter();
808 808 }
809 809
810 810 // at this point everyone should have synced up and not be doing any
811 811 // more work
812 812
813 813 if (barrier_aborted) {
814 814 // If the barrier aborted we ignore the overflow condition and
815 815 // just abort the whole marking phase as quickly as possible.
816 816 return;
817 817 }
818 818 }
819 819
820 820 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
821 821 SuspendibleThreadSetLeaver sts_leave(concurrent());
822 822 _second_overflow_barrier_sync.enter();
823 823
824 824 // at this point everything should be re-initialized and ready to go
825 825 }
826 826
827 827 class G1CMConcurrentMarkingTask : public AbstractGangTask {
828 828 G1ConcurrentMark* _cm;
829 829
830 830 public:
831 831 void work(uint worker_id) {
832 832 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
833 833 ResourceMark rm;
834 834
835 835 double start_vtime = os::elapsedVTime();
836 836
837 837 {
838 838 SuspendibleThreadSetJoiner sts_join;
839 839
840 840 assert(worker_id < _cm->active_tasks(), "invariant");
841 841
842 842 G1CMTask* task = _cm->task(worker_id);
843 843 task->record_start_time();
844 844 if (!_cm->has_aborted()) {
845 845 do {
846 846 task->do_marking_step(G1ConcMarkStepDurationMillis,
847 847 true /* do_termination */,
848 848 false /* is_serial*/);
849 849
850 850 _cm->do_yield_check();
851 851 } while (!_cm->has_aborted() && task->has_aborted());
852 852 }
853 853 task->record_end_time();
854 854 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
855 855 }
856 856
857 857 double end_vtime = os::elapsedVTime();
858 858 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
859 859 }
860 860
861 861 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
862 862 AbstractGangTask("Concurrent Mark"), _cm(cm) { }
863 863
864 864 ~G1CMConcurrentMarkingTask() { }
865 865 };
866 866
867 867 uint G1ConcurrentMark::calc_active_marking_workers() {
868 868 uint result = 0;
869 869 if (!UseDynamicNumberOfGCThreads ||
870 870 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
871 871 !ForceDynamicNumberOfGCThreads)) {
872 872 result = _max_concurrent_workers;
873 873 } else {
874 874 result =
875 875 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
876 876 1, /* Minimum workers */
877 877 _num_concurrent_workers,
878 878 Threads::number_of_non_daemon_threads());
879 879 // Don't scale the result down by scale_concurrent_workers() because
880 880 // that scaling has already gone into "_max_concurrent_workers".
881 881 }
882 882 assert(result > 0 && result <= _max_concurrent_workers,
883 883 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
884 884 _max_concurrent_workers, result);
885 885 return result;
886 886 }
887 887
888 888 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
889 889 #ifdef ASSERT
890 890 HeapWord* last = region->last();
891 891 HeapRegion* hr = _g1h->heap_region_containing(last);
892 892 assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
893 893 "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
894 894 assert(hr->next_top_at_mark_start() == region->start(),
895 895 "MemRegion start should be equal to nTAMS");
896 896 #endif
897 897
898 898 G1RootRegionScanClosure cl(_g1h, this, worker_id);
899 899
900 900 const uintx interval = PrefetchScanIntervalInBytes;
901 901 HeapWord* curr = region->start();
902 902 const HeapWord* end = region->end();
903 903 while (curr < end) {
904 904 Prefetch::read(curr, interval);
905 905 oop obj = oop(curr);
906 906 int size = obj->oop_iterate_size(&cl);
907 907 assert(size == obj->size(), "sanity");
908 908 curr += size;
909 909 }
910 910 }
911 911
912 912 class G1CMRootRegionScanTask : public AbstractGangTask {
913 913 G1ConcurrentMark* _cm;
914 914 public:
915 915 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
916 916 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
917 917
918 918 void work(uint worker_id) {
919 919 assert(Thread::current()->is_ConcurrentGC_thread(),
920 920 "this should only be done by a conc GC thread");
921 921
922 922 G1CMRootMemRegions* root_regions = _cm->root_regions();
923 923 const MemRegion* region = root_regions->claim_next();
924 924 while (region != NULL) {
925 925 _cm->scan_root_region(region, worker_id);
926 926 region = root_regions->claim_next();
927 927 }
928 928 }
929 929 };
930 930
931 931 void G1ConcurrentMark::scan_root_regions() {
932 932 // scan_in_progress() will have been set to true only if there was
933 933 // at least one root region to scan. So, if it's false, we
934 934 // should not attempt to do any further work.
935 935 if (root_regions()->scan_in_progress()) {
936 936 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
937 937
938 938 _num_concurrent_workers = MIN2(calc_active_marking_workers(),
939 939 // We distribute work on a per-region basis, so starting
940 940 // more threads than that is useless.
941 941 root_regions()->num_root_regions());
942 942 assert(_num_concurrent_workers <= _max_concurrent_workers,
943 943 "Maximum number of marking threads exceeded");
944 944
945 945 G1CMRootRegionScanTask task(this);
946 946 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
947 947 task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
948 948 _concurrent_workers->run_task(&task, _num_concurrent_workers);
949 949
950 950 // It's possible that has_aborted() is true here without actually
951 951 // aborting the survivor scan earlier. This is OK as it's
952 952 // mainly used for sanity checking.
953 953 root_regions()->scan_finished();
954 954 }
955 955 }
956 956
957 957 void G1ConcurrentMark::concurrent_cycle_start() {
958 958 _gc_timer_cm->register_gc_start();
959 959
960 960 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
961 961
962 962 _g1h->trace_heap_before_gc(_gc_tracer_cm);
963 963 }
964 964
965 965 void G1ConcurrentMark::concurrent_cycle_end() {
966 966 _g1h->collector_state()->set_clearing_next_bitmap(false);
967 967
968 968 _g1h->trace_heap_after_gc(_gc_tracer_cm);
969 969
970 970 if (has_aborted()) {
971 971 log_info(gc, marking)("Concurrent Mark Abort");
972 972 _gc_tracer_cm->report_concurrent_mode_failure();
973 973 }
974 974
975 975 _gc_timer_cm->register_gc_end();
976 976
977 977 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
978 978 }
979 979
980 980 void G1ConcurrentMark::mark_from_roots() {
981 981 _restart_for_overflow = false;
982 982
983 983 _num_concurrent_workers = calc_active_marking_workers();
984 984
985 985 uint active_workers = MAX2(1U, _num_concurrent_workers);
986 986
987 987 // Setting active workers is not guaranteed since fewer
988 988 // worker threads may currently exist and more may not be
989 989 // available.
990 990 active_workers = _concurrent_workers->update_active_workers(active_workers);
991 991 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
992 992
993 993 // Parallel task terminator is set in "set_concurrency_and_phase()"
994 994 set_concurrency_and_phase(active_workers, true /* concurrent */);
995 995
996 996 G1CMConcurrentMarkingTask marking_task(this);
997 997 _concurrent_workers->run_task(&marking_task);
998 998 print_stats();
999 999 }
1000 1000
1001 1001 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1002 1002 G1HeapVerifier* verifier = _g1h->verifier();
1003 1003
1004 1004 verifier->verify_region_sets_optional();
1005 1005
1006 1006 if (VerifyDuringGC) {
1007 1007 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1008 1008
1009 1009 size_t const BufLen = 512;
1010 1010 char buffer[BufLen];
1011 1011
1012 1012 jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1013 1013 verifier->verify(type, vo, buffer);
1014 1014 }
1015 1015
1016 1016 verifier->check_bitmaps(caller);
1017 1017 }
1018 1018
1019 1019 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1020 1020 G1CollectedHeap* _g1h;
1021 1021 G1ConcurrentMark* _cm;
1022 1022 HeapRegionClaimer _hrclaimer;
1023 1023 uint volatile _total_selected_for_rebuild;
1024 1024
1025 1025 G1PrintRegionLivenessInfoClosure _cl;
1026 1026
1027 1027 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1028 1028 G1CollectedHeap* _g1h;
1029 1029 G1ConcurrentMark* _cm;
1030 1030
1031 1031 G1PrintRegionLivenessInfoClosure* _cl;
1032 1032
1033 1033 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
1034 1034
1035 1035 void update_remset_before_rebuild(HeapRegion* hr) {
1036 1036 G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1037 1037
1038 1038 bool selected_for_rebuild;
1039 1039 if (hr->is_humongous()) {
1040 1040 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1041 1041 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1042 1042 } else {
1043 1043 size_t const live_bytes = _cm->liveness(hr->hrm_index());
1044 1044 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1045 1045 }
1046 1046 if (selected_for_rebuild) {
1047 1047 _num_regions_selected_for_rebuild++;
1048 1048 }
1049 1049 _cm->update_top_at_rebuild_start(hr);
1050 1050 }
1051 1051
1052 1052 // Distribute the given words across the humongous object starting with hr and
1053 1053 // note end of marking.
1054 1054 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1055 1055 uint const region_idx = hr->hrm_index();
1056 1056 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1057 1057 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1058 1058
1059 1059 // "Distributing" zero words means that we only note end of marking for these
1060 1060 // regions.
1061 1061 assert(marked_words == 0 || obj_size_in_words == marked_words,
1062 1062 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1063 1063 obj_size_in_words, marked_words);
1064 1064
1065 1065 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1066 1066 HeapRegion* const r = _g1h->region_at(i);
1067 1067 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1068 1068
1069 1069 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1070 1070 words_to_add, i, r->get_type_str());
1071 1071 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1072 1072 marked_words -= words_to_add;
1073 1073 }
1074 1074 assert(marked_words == 0,
1075 1075 SIZE_FORMAT " words left after distributing space across %u regions",
1076 1076 marked_words, num_regions_in_humongous);
1077 1077 }
1078 1078
1079 1079 void update_marked_bytes(HeapRegion* hr) {
1080 1080 uint const region_idx = hr->hrm_index();
1081 1081 size_t const marked_words = _cm->liveness(region_idx);
1082 1082 // The marking attributes the object's size completely to the humongous starts
1083 1083 // region. We need to distribute this value across the entire set of regions a
1084 1084 // humongous object spans.
1085 1085 if (hr->is_humongous()) {
1086 1086 assert(hr->is_starts_humongous() || marked_words == 0,
1087 1087 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1088 1088 marked_words, region_idx, hr->get_type_str());
1089 1089 if (hr->is_starts_humongous()) {
1090 1090 distribute_marked_bytes(hr, marked_words);
1091 1091 }
1092 1092 } else {
1093 1093 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1094 1094 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1095 1095 }
1096 1096 }
1097 1097
1098 1098 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1099 1099 hr->add_to_marked_bytes(marked_bytes);
1100 1100 _cl->do_heap_region(hr);
1101 1101 hr->note_end_of_marking();
1102 1102 }
1103 1103
1104 1104 public:
1105 1105 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1106 1106 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1107 1107
1108 1108 virtual bool do_heap_region(HeapRegion* r) {
1109 1109 update_remset_before_rebuild(r);
1110 1110 update_marked_bytes(r);
1111 1111
1112 1112 return false;
1113 1113 }
1114 1114
1115 1115 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1116 1116 };
1117 1117
1118 1118 public:
1119 1119 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1120 1120 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1121 1121 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1122 1122
1123 1123 virtual void work(uint worker_id) {
1124 1124 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1125 1125 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1126 1126 Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1127 1127 }
1128 1128
1129 1129 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1130 1130
1131 1131 // Number of regions for which roughly one thread should be spawned for this work.
1132 1132 static const uint RegionsPerThread = 384;
1133 1133 };
1134 1134
1135 1135 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1136 1136 G1CollectedHeap* _g1h;
1137 1137 public:
1138 1138 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1139 1139
1140 1140 virtual bool do_heap_region(HeapRegion* r) {
1141 1141 _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1142 1142 return false;
1143 1143 }
1144 1144 };
1145 1145
1146 1146 void G1ConcurrentMark::remark() {
1147 1147 assert_at_safepoint_on_vm_thread();
1148 1148
1149 1149 // If a full collection has happened, we should not continue. However we might
1150 1150 // have ended up here as the Remark VM operation has been scheduled already.
1151 1151 if (has_aborted()) {
1152 1152 return;
1153 1153 }
1154 1154
1155 1155 G1Policy* policy = _g1h->policy();
1156 1156 policy->record_concurrent_mark_remark_start();
1157 1157
1158 1158 double start = os::elapsedTime();
1159 1159
1160 1160 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1161 1161
1162 1162 {
1163 1163 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1164 1164 finalize_marking();
1165 1165 }
1166 1166
1167 1167 double mark_work_end = os::elapsedTime();
1168 1168
1169 1169 bool const mark_finished = !has_overflown();
1170 1170 if (mark_finished) {
1171 1171 weak_refs_work(false /* clear_all_soft_refs */);
1172 1172
1173 1173 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1174 1174 // We're done with marking.
1175 1175 // This is the end of the marking cycle, we're expected all
1176 1176 // threads to have SATB queues with active set to true.
1177 1177 satb_mq_set.set_active_all_threads(false, /* new active value */
1178 1178 true /* expected_active */);
1179 1179
1180 1180 {
1181 1181 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1182 1182 flush_all_task_caches();
1183 1183 }
1184 1184
1185 1185 // Install newly created mark bitmap as "prev".
1186 1186 swap_mark_bitmaps();
1187 1187 {
1188 1188 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1189 1189
1190 1190 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1191 1191 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1192 1192 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1193 1193
1194 1194 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1195 1195 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1196 1196 _g1h->workers()->run_task(&cl, num_workers);
1197 1197
1198 1198 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1199 1199 _g1h->num_regions(), cl.total_selected_for_rebuild());
1200 1200 }
1201 1201 {
↓ open down ↓ |
1201 lines elided |
↑ open up ↑ |
1202 1202 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1203 1203 reclaim_empty_regions();
1204 1204 }
1205 1205
1206 1206 // Clean out dead classes
1207 1207 if (ClassUnloadingWithConcurrentMark) {
1208 1208 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1209 1209 ClassLoaderDataGraph::purge();
1210 1210 }
1211 1211
1212 - _g1h->resize_heap_if_necessary();
1213 -
1214 1212 compute_new_sizes();
1215 1213
1216 1214 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1217 1215
1218 1216 assert(!restart_for_overflow(), "sanity");
1219 1217 // Completely reset the marking state since marking completed
1220 1218 reset_at_marking_complete();
1221 1219 } else {
1222 1220 // We overflowed. Restart concurrent marking.
1223 1221 _restart_for_overflow = true;
1224 1222
1225 1223 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1226 1224
1227 1225 // Clear the marking state because we will be restarting
1228 1226 // marking due to overflowing the global mark stack.
1229 1227 reset_marking_for_restart();
1230 1228 }
1231 1229
1232 1230 {
1233 1231 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1234 1232 report_object_count(mark_finished);
1235 1233 }
1236 1234
1237 1235 // Statistics
1238 1236 double now = os::elapsedTime();
1239 1237 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1240 1238 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1241 1239 _remark_times.add((now - start) * 1000.0);
1242 1240
1243 1241 policy->record_concurrent_mark_remark_end();
1244 1242 }
1245 1243
1246 1244 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1247 1245 // Per-region work during the Cleanup pause.
1248 1246 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1249 1247 G1CollectedHeap* _g1h;
1250 1248 size_t _freed_bytes;
1251 1249 FreeRegionList* _local_cleanup_list;
1252 1250 uint _old_regions_removed;
1253 1251 uint _humongous_regions_removed;
1254 1252
1255 1253 public:
1256 1254 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1257 1255 FreeRegionList* local_cleanup_list) :
1258 1256 _g1h(g1h),
1259 1257 _freed_bytes(0),
1260 1258 _local_cleanup_list(local_cleanup_list),
1261 1259 _old_regions_removed(0),
1262 1260 _humongous_regions_removed(0) { }
1263 1261
1264 1262 size_t freed_bytes() { return _freed_bytes; }
1265 1263 const uint old_regions_removed() { return _old_regions_removed; }
1266 1264 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1267 1265
1268 1266 bool do_heap_region(HeapRegion *hr) {
1269 1267 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1270 1268 _freed_bytes += hr->used();
1271 1269 hr->set_containing_set(NULL);
1272 1270 if (hr->is_humongous()) {
1273 1271 _humongous_regions_removed++;
1274 1272 _g1h->free_humongous_region(hr, _local_cleanup_list);
1275 1273 } else {
1276 1274 _old_regions_removed++;
1277 1275 _g1h->free_region(hr, _local_cleanup_list);
1278 1276 }
1279 1277 hr->clear_cardtable();
1280 1278 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1281 1279 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1282 1280 }
1283 1281
1284 1282 return false;
1285 1283 }
1286 1284 };
1287 1285
1288 1286 G1CollectedHeap* _g1h;
1289 1287 FreeRegionList* _cleanup_list;
1290 1288 HeapRegionClaimer _hrclaimer;
1291 1289
1292 1290 public:
1293 1291 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1294 1292 AbstractGangTask("G1 Cleanup"),
1295 1293 _g1h(g1h),
1296 1294 _cleanup_list(cleanup_list),
1297 1295 _hrclaimer(n_workers) {
1298 1296 }
1299 1297
1300 1298 void work(uint worker_id) {
1301 1299 FreeRegionList local_cleanup_list("Local Cleanup List");
1302 1300 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1303 1301 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1304 1302 assert(cl.is_complete(), "Shouldn't have aborted!");
1305 1303
1306 1304 // Now update the old/humongous region sets
1307 1305 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1308 1306 {
1309 1307 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1310 1308 _g1h->decrement_summary_bytes(cl.freed_bytes());
1311 1309
1312 1310 _cleanup_list->add_ordered(&local_cleanup_list);
1313 1311 assert(local_cleanup_list.is_empty(), "post-condition");
1314 1312 }
1315 1313 }
1316 1314 };
1317 1315
1318 1316 void G1ConcurrentMark::reclaim_empty_regions() {
1319 1317 WorkGang* workers = _g1h->workers();
1320 1318 FreeRegionList empty_regions_list("Empty Regions After Mark List");
1321 1319
1322 1320 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1323 1321 workers->run_task(&cl);
1324 1322
1325 1323 if (!empty_regions_list.is_empty()) {
1326 1324 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1327 1325 // Now print the empty regions list.
1328 1326 G1HRPrinter* hrp = _g1h->hr_printer();
1329 1327 if (hrp->is_active()) {
1330 1328 FreeRegionListIterator iter(&empty_regions_list);
1331 1329 while (iter.more_available()) {
1332 1330 HeapRegion* hr = iter.get_next();
1333 1331 hrp->cleanup(hr);
1334 1332 }
1335 1333 }
1336 1334 // And actually make them available.
1337 1335 _g1h->prepend_to_freelist(&empty_regions_list);
1338 1336 }
1339 1337 }
1340 1338
1341 1339 void G1ConcurrentMark::compute_new_sizes() {
1342 1340 MetaspaceGC::compute_new_size();
1343 1341
1344 1342 // Cleanup will have freed any regions completely full of garbage.
1345 1343 // Update the soft reference policy with the new heap occupancy.
1346 1344 Universe::update_heap_info_at_gc();
1347 1345
1348 1346 // We reclaimed old regions so we should calculate the sizes to make
1349 1347 // sure we update the old gen/space data.
1350 1348 _g1h->g1mm()->update_sizes();
1351 1349 }
1352 1350
1353 1351 void G1ConcurrentMark::cleanup() {
1354 1352 assert_at_safepoint_on_vm_thread();
1355 1353
1356 1354 // If a full collection has happened, we shouldn't do this.
1357 1355 if (has_aborted()) {
1358 1356 return;
1359 1357 }
1360 1358
1361 1359 G1Policy* policy = _g1h->policy();
1362 1360 policy->record_concurrent_mark_cleanup_start();
1363 1361
1364 1362 double start = os::elapsedTime();
1365 1363
1366 1364 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1367 1365
1368 1366 {
1369 1367 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1370 1368 G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1371 1369 _g1h->heap_region_iterate(&cl);
1372 1370 }
1373 1371
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
1374 1372 if (log_is_enabled(Trace, gc, liveness)) {
1375 1373 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1376 1374 _g1h->heap_region_iterate(&cl);
1377 1375 }
1378 1376
1379 1377 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1380 1378
1381 1379 // We need to make this be a "collection" so any collection pause that
1382 1380 // races with it goes around and waits for Cleanup to finish.
1383 1381 _g1h->increment_total_collections();
1382 +
1383 + {
1384 + GCTraceTime(Debug, gc, phases) debug("Expand heap after concurrent mark", _gc_timer_cm);
1385 + _g1h->expand_heap_after_concurrent_mark();
1386 + }
1384 1387
1385 1388 // Local statistics
1386 1389 double recent_cleanup_time = (os::elapsedTime() - start);
1387 1390 _total_cleanup_time += recent_cleanup_time;
1388 1391 _cleanup_times.add(recent_cleanup_time);
1389 1392
1390 1393 {
1391 1394 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1392 1395 policy->record_concurrent_mark_cleanup_end();
1393 1396 }
1394 1397 }
1395 1398
1396 1399 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1397 1400 // Uses the G1CMTask associated with a worker thread (for serial reference
1398 1401 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1399 1402 // trace referent objects.
1400 1403 //
1401 1404 // Using the G1CMTask and embedded local queues avoids having the worker
1402 1405 // threads operating on the global mark stack. This reduces the risk
1403 1406 // of overflowing the stack - which we would rather avoid at this late
1404 1407 // state. Also using the tasks' local queues removes the potential
1405 1408 // of the workers interfering with each other that could occur if
1406 1409 // operating on the global stack.
1407 1410
1408 1411 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1409 1412 G1ConcurrentMark* _cm;
1410 1413 G1CMTask* _task;
1411 1414 uint _ref_counter_limit;
1412 1415 uint _ref_counter;
1413 1416 bool _is_serial;
1414 1417 public:
1415 1418 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1416 1419 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1417 1420 _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1418 1421 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1419 1422 }
1420 1423
1421 1424 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1422 1425 virtual void do_oop( oop* p) { do_oop_work(p); }
1423 1426
1424 1427 template <class T> void do_oop_work(T* p) {
1425 1428 if (_cm->has_overflown()) {
1426 1429 return;
1427 1430 }
1428 1431 if (!_task->deal_with_reference(p)) {
1429 1432 // We did not add anything to the mark bitmap (or mark stack), so there is
1430 1433 // no point trying to drain it.
1431 1434 return;
1432 1435 }
1433 1436 _ref_counter--;
1434 1437
1435 1438 if (_ref_counter == 0) {
1436 1439 // We have dealt with _ref_counter_limit references, pushing them
1437 1440 // and objects reachable from them on to the local stack (and
1438 1441 // possibly the global stack). Call G1CMTask::do_marking_step() to
1439 1442 // process these entries.
1440 1443 //
1441 1444 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1442 1445 // there's nothing more to do (i.e. we're done with the entries that
1443 1446 // were pushed as a result of the G1CMTask::deal_with_reference() calls
1444 1447 // above) or we overflow.
1445 1448 //
1446 1449 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1447 1450 // flag while there may still be some work to do. (See the comment at
1448 1451 // the beginning of G1CMTask::do_marking_step() for those conditions -
1449 1452 // one of which is reaching the specified time target.) It is only
1450 1453 // when G1CMTask::do_marking_step() returns without setting the
1451 1454 // has_aborted() flag that the marking step has completed.
1452 1455 do {
1453 1456 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1454 1457 _task->do_marking_step(mark_step_duration_ms,
1455 1458 false /* do_termination */,
1456 1459 _is_serial);
1457 1460 } while (_task->has_aborted() && !_cm->has_overflown());
1458 1461 _ref_counter = _ref_counter_limit;
1459 1462 }
1460 1463 }
1461 1464 };
1462 1465
1463 1466 // 'Drain' oop closure used by both serial and parallel reference processing.
1464 1467 // Uses the G1CMTask associated with a given worker thread (for serial
1465 1468 // reference processing the G1CMtask for worker 0 is used). Calls the
1466 1469 // do_marking_step routine, with an unbelievably large timeout value,
1467 1470 // to drain the marking data structures of the remaining entries
1468 1471 // added by the 'keep alive' oop closure above.
1469 1472
1470 1473 class G1CMDrainMarkingStackClosure : public VoidClosure {
1471 1474 G1ConcurrentMark* _cm;
1472 1475 G1CMTask* _task;
1473 1476 bool _is_serial;
1474 1477 public:
1475 1478 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1476 1479 _cm(cm), _task(task), _is_serial(is_serial) {
1477 1480 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1478 1481 }
1479 1482
1480 1483 void do_void() {
1481 1484 do {
1482 1485 // We call G1CMTask::do_marking_step() to completely drain the local
1483 1486 // and global marking stacks of entries pushed by the 'keep alive'
1484 1487 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1485 1488 //
1486 1489 // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1487 1490 // if there's nothing more to do (i.e. we've completely drained the
1488 1491 // entries that were pushed as a a result of applying the 'keep alive'
1489 1492 // closure to the entries on the discovered ref lists) or we overflow
1490 1493 // the global marking stack.
1491 1494 //
1492 1495 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1493 1496 // flag while there may still be some work to do. (See the comment at
1494 1497 // the beginning of G1CMTask::do_marking_step() for those conditions -
1495 1498 // one of which is reaching the specified time target.) It is only
1496 1499 // when G1CMTask::do_marking_step() returns without setting the
1497 1500 // has_aborted() flag that the marking step has completed.
1498 1501
1499 1502 _task->do_marking_step(1000000000.0 /* something very large */,
1500 1503 true /* do_termination */,
1501 1504 _is_serial);
1502 1505 } while (_task->has_aborted() && !_cm->has_overflown());
1503 1506 }
1504 1507 };
1505 1508
1506 1509 // Implementation of AbstractRefProcTaskExecutor for parallel
1507 1510 // reference processing at the end of G1 concurrent marking
1508 1511
1509 1512 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1510 1513 private:
1511 1514 G1CollectedHeap* _g1h;
1512 1515 G1ConcurrentMark* _cm;
1513 1516 WorkGang* _workers;
1514 1517 uint _active_workers;
1515 1518
1516 1519 public:
1517 1520 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1518 1521 G1ConcurrentMark* cm,
1519 1522 WorkGang* workers,
1520 1523 uint n_workers) :
1521 1524 _g1h(g1h), _cm(cm),
1522 1525 _workers(workers), _active_workers(n_workers) { }
1523 1526
1524 1527 virtual void execute(ProcessTask& task, uint ergo_workers);
1525 1528 };
1526 1529
1527 1530 class G1CMRefProcTaskProxy : public AbstractGangTask {
1528 1531 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1529 1532 ProcessTask& _proc_task;
1530 1533 G1CollectedHeap* _g1h;
1531 1534 G1ConcurrentMark* _cm;
1532 1535
1533 1536 public:
1534 1537 G1CMRefProcTaskProxy(ProcessTask& proc_task,
1535 1538 G1CollectedHeap* g1h,
1536 1539 G1ConcurrentMark* cm) :
1537 1540 AbstractGangTask("Process reference objects in parallel"),
1538 1541 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1539 1542 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1540 1543 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1541 1544 }
1542 1545
1543 1546 virtual void work(uint worker_id) {
1544 1547 ResourceMark rm;
1545 1548 HandleMark hm;
1546 1549 G1CMTask* task = _cm->task(worker_id);
1547 1550 G1CMIsAliveClosure g1_is_alive(_g1h);
1548 1551 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1549 1552 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1550 1553
1551 1554 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1552 1555 }
1553 1556 };
1554 1557
1555 1558 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1556 1559 assert(_workers != NULL, "Need parallel worker threads.");
1557 1560 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1558 1561 assert(_workers->active_workers() >= ergo_workers,
1559 1562 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1560 1563 ergo_workers, _workers->active_workers());
1561 1564
1562 1565 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1563 1566
1564 1567 // We need to reset the concurrency level before each
1565 1568 // proxy task execution, so that the termination protocol
1566 1569 // and overflow handling in G1CMTask::do_marking_step() knows
1567 1570 // how many workers to wait for.
1568 1571 _cm->set_concurrency(ergo_workers);
1569 1572 _workers->run_task(&proc_task_proxy, ergo_workers);
1570 1573 }
1571 1574
1572 1575 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1573 1576 ResourceMark rm;
1574 1577 HandleMark hm;
1575 1578
1576 1579 // Is alive closure.
1577 1580 G1CMIsAliveClosure g1_is_alive(_g1h);
1578 1581
1579 1582 // Inner scope to exclude the cleaning of the string table
1580 1583 // from the displayed time.
1581 1584 {
1582 1585 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1583 1586
1584 1587 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1585 1588
1586 1589 // See the comment in G1CollectedHeap::ref_processing_init()
1587 1590 // about how reference processing currently works in G1.
1588 1591
1589 1592 // Set the soft reference policy
1590 1593 rp->setup_policy(clear_all_soft_refs);
1591 1594 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1592 1595
1593 1596 // Instances of the 'Keep Alive' and 'Complete GC' closures used
1594 1597 // in serial reference processing. Note these closures are also
1595 1598 // used for serially processing (by the the current thread) the
1596 1599 // JNI references during parallel reference processing.
1597 1600 //
1598 1601 // These closures do not need to synchronize with the worker
1599 1602 // threads involved in parallel reference processing as these
1600 1603 // instances are executed serially by the current thread (e.g.
1601 1604 // reference processing is not multi-threaded and is thus
1602 1605 // performed by the current thread instead of a gang worker).
1603 1606 //
1604 1607 // The gang tasks involved in parallel reference processing create
1605 1608 // their own instances of these closures, which do their own
1606 1609 // synchronization among themselves.
1607 1610 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1608 1611 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1609 1612
1610 1613 // We need at least one active thread. If reference processing
1611 1614 // is not multi-threaded we use the current (VMThread) thread,
1612 1615 // otherwise we use the work gang from the G1CollectedHeap and
1613 1616 // we utilize all the worker threads we can.
1614 1617 bool processing_is_mt = rp->processing_is_mt();
1615 1618 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1616 1619 active_workers = clamp(active_workers, 1u, _max_num_tasks);
1617 1620
1618 1621 // Parallel processing task executor.
1619 1622 G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1620 1623 _g1h->workers(), active_workers);
1621 1624 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1622 1625
1623 1626 // Set the concurrency level. The phase was already set prior to
1624 1627 // executing the remark task.
1625 1628 set_concurrency(active_workers);
1626 1629
1627 1630 // Set the degree of MT processing here. If the discovery was done MT,
1628 1631 // the number of threads involved during discovery could differ from
1629 1632 // the number of active workers. This is OK as long as the discovered
1630 1633 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1631 1634 rp->set_active_mt_degree(active_workers);
1632 1635
1633 1636 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1634 1637
1635 1638 // Process the weak references.
1636 1639 const ReferenceProcessorStats& stats =
1637 1640 rp->process_discovered_references(&g1_is_alive,
1638 1641 &g1_keep_alive,
1639 1642 &g1_drain_mark_stack,
1640 1643 executor,
1641 1644 &pt);
1642 1645 _gc_tracer_cm->report_gc_reference_stats(stats);
1643 1646 pt.print_all_references();
1644 1647
1645 1648 // The do_oop work routines of the keep_alive and drain_marking_stack
1646 1649 // oop closures will set the has_overflown flag if we overflow the
1647 1650 // global marking stack.
1648 1651
1649 1652 assert(has_overflown() || _global_mark_stack.is_empty(),
1650 1653 "Mark stack should be empty (unless it has overflown)");
1651 1654
1652 1655 assert(rp->num_queues() == active_workers, "why not");
1653 1656
1654 1657 rp->verify_no_references_recorded();
1655 1658 assert(!rp->discovery_enabled(), "Post condition");
1656 1659 }
1657 1660
1658 1661 if (has_overflown()) {
1659 1662 // We can not trust g1_is_alive and the contents of the heap if the marking stack
1660 1663 // overflowed while processing references. Exit the VM.
1661 1664 fatal("Overflow during reference processing, can not continue. Please "
1662 1665 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1663 1666 "restart.", MarkStackSizeMax);
1664 1667 return;
1665 1668 }
1666 1669
1667 1670 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1668 1671
1669 1672 {
1670 1673 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1671 1674 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1672 1675 }
1673 1676
1674 1677 // Unload Klasses, String, Code Cache, etc.
1675 1678 if (ClassUnloadingWithConcurrentMark) {
1676 1679 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1677 1680 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1678 1681 _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1679 1682 } else if (StringDedup::is_enabled()) {
1680 1683 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1681 1684 _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1682 1685 }
1683 1686 }
1684 1687
1685 1688 class G1PrecleanYieldClosure : public YieldClosure {
1686 1689 G1ConcurrentMark* _cm;
1687 1690
1688 1691 public:
1689 1692 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1690 1693
1691 1694 virtual bool should_return() {
1692 1695 return _cm->has_aborted();
1693 1696 }
1694 1697
1695 1698 virtual bool should_return_fine_grain() {
1696 1699 _cm->do_yield_check();
1697 1700 return _cm->has_aborted();
1698 1701 }
1699 1702 };
1700 1703
1701 1704 void G1ConcurrentMark::preclean() {
1702 1705 assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1703 1706
1704 1707 SuspendibleThreadSetJoiner joiner;
1705 1708
1706 1709 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1707 1710 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1708 1711
1709 1712 set_concurrency_and_phase(1, true);
1710 1713
1711 1714 G1PrecleanYieldClosure yield_cl(this);
1712 1715
1713 1716 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1714 1717 // Precleaning is single threaded. Temporarily disable MT discovery.
1715 1718 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1716 1719 rp->preclean_discovered_references(rp->is_alive_non_header(),
1717 1720 &keep_alive,
1718 1721 &drain_mark_stack,
1719 1722 &yield_cl,
1720 1723 _gc_timer_cm);
1721 1724 }
1722 1725
1723 1726 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1724 1727 // the prev bitmap determining liveness.
1725 1728 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1726 1729 G1CollectedHeap* _g1h;
1727 1730 public:
1728 1731 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1729 1732
1730 1733 bool do_object_b(oop obj) {
1731 1734 HeapWord* addr = (HeapWord*)obj;
1732 1735 return addr != NULL &&
1733 1736 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1734 1737 }
1735 1738 };
1736 1739
1737 1740 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1738 1741 // Depending on the completion of the marking liveness needs to be determined
1739 1742 // using either the next or prev bitmap.
1740 1743 if (mark_completed) {
1741 1744 G1ObjectCountIsAliveClosure is_alive(_g1h);
1742 1745 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1743 1746 } else {
1744 1747 G1CMIsAliveClosure is_alive(_g1h);
1745 1748 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1746 1749 }
1747 1750 }
1748 1751
1749 1752
1750 1753 void G1ConcurrentMark::swap_mark_bitmaps() {
1751 1754 G1CMBitMap* temp = _prev_mark_bitmap;
1752 1755 _prev_mark_bitmap = _next_mark_bitmap;
1753 1756 _next_mark_bitmap = temp;
1754 1757 _g1h->collector_state()->set_clearing_next_bitmap(true);
1755 1758 }
1756 1759
1757 1760 // Closure for marking entries in SATB buffers.
1758 1761 class G1CMSATBBufferClosure : public SATBBufferClosure {
1759 1762 private:
1760 1763 G1CMTask* _task;
1761 1764 G1CollectedHeap* _g1h;
1762 1765
1763 1766 // This is very similar to G1CMTask::deal_with_reference, but with
1764 1767 // more relaxed requirements for the argument, so this must be more
1765 1768 // circumspect about treating the argument as an object.
1766 1769 void do_entry(void* entry) const {
1767 1770 _task->increment_refs_reached();
1768 1771 oop const obj = static_cast<oop>(entry);
1769 1772 _task->make_reference_grey(obj);
1770 1773 }
1771 1774
1772 1775 public:
1773 1776 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1774 1777 : _task(task), _g1h(g1h) { }
1775 1778
1776 1779 virtual void do_buffer(void** buffer, size_t size) {
1777 1780 for (size_t i = 0; i < size; ++i) {
1778 1781 do_entry(buffer[i]);
1779 1782 }
1780 1783 }
1781 1784 };
1782 1785
1783 1786 class G1RemarkThreadsClosure : public ThreadClosure {
1784 1787 G1CMSATBBufferClosure _cm_satb_cl;
1785 1788 G1CMOopClosure _cm_cl;
1786 1789 MarkingCodeBlobClosure _code_cl;
1787 1790 uintx _claim_token;
1788 1791
1789 1792 public:
1790 1793 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1791 1794 _cm_satb_cl(task, g1h),
1792 1795 _cm_cl(g1h, task),
1793 1796 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1794 1797 _claim_token(Threads::thread_claim_token()) {}
1795 1798
1796 1799 void do_thread(Thread* thread) {
1797 1800 if (thread->claim_threads_do(true, _claim_token)) {
1798 1801 SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1799 1802 queue.apply_closure_and_empty(&_cm_satb_cl);
1800 1803 if (thread->is_Java_thread()) {
1801 1804 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1802 1805 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1803 1806 // * Alive if on the stack of an executing method
1804 1807 // * Weakly reachable otherwise
1805 1808 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1806 1809 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1807 1810 JavaThread* jt = (JavaThread*)thread;
1808 1811 jt->nmethods_do(&_code_cl);
1809 1812 }
1810 1813 }
1811 1814 }
1812 1815 };
1813 1816
1814 1817 class G1CMRemarkTask : public AbstractGangTask {
1815 1818 G1ConcurrentMark* _cm;
1816 1819 public:
1817 1820 void work(uint worker_id) {
1818 1821 G1CMTask* task = _cm->task(worker_id);
1819 1822 task->record_start_time();
1820 1823 {
1821 1824 ResourceMark rm;
1822 1825 HandleMark hm;
1823 1826
1824 1827 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1825 1828 Threads::threads_do(&threads_f);
1826 1829 }
1827 1830
1828 1831 do {
1829 1832 task->do_marking_step(1000000000.0 /* something very large */,
1830 1833 true /* do_termination */,
1831 1834 false /* is_serial */);
1832 1835 } while (task->has_aborted() && !_cm->has_overflown());
1833 1836 // If we overflow, then we do not want to restart. We instead
1834 1837 // want to abort remark and do concurrent marking again.
1835 1838 task->record_end_time();
1836 1839 }
1837 1840
1838 1841 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1839 1842 AbstractGangTask("Par Remark"), _cm(cm) {
1840 1843 _cm->terminator()->reset_for_reuse(active_workers);
1841 1844 }
1842 1845 };
1843 1846
1844 1847 void G1ConcurrentMark::finalize_marking() {
1845 1848 ResourceMark rm;
1846 1849 HandleMark hm;
1847 1850
1848 1851 _g1h->ensure_parsability(false);
1849 1852
1850 1853 // this is remark, so we'll use up all active threads
1851 1854 uint active_workers = _g1h->workers()->active_workers();
1852 1855 set_concurrency_and_phase(active_workers, false /* concurrent */);
1853 1856 // Leave _parallel_marking_threads at it's
1854 1857 // value originally calculated in the G1ConcurrentMark
1855 1858 // constructor and pass values of the active workers
1856 1859 // through the gang in the task.
1857 1860
1858 1861 {
1859 1862 StrongRootsScope srs(active_workers);
1860 1863
1861 1864 G1CMRemarkTask remarkTask(this, active_workers);
1862 1865 // We will start all available threads, even if we decide that the
1863 1866 // active_workers will be fewer. The extra ones will just bail out
1864 1867 // immediately.
1865 1868 _g1h->workers()->run_task(&remarkTask);
1866 1869 }
1867 1870
1868 1871 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1869 1872 guarantee(has_overflown() ||
1870 1873 satb_mq_set.completed_buffers_num() == 0,
1871 1874 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1872 1875 BOOL_TO_STR(has_overflown()),
1873 1876 satb_mq_set.completed_buffers_num());
1874 1877
1875 1878 print_stats();
1876 1879 }
1877 1880
1878 1881 void G1ConcurrentMark::flush_all_task_caches() {
1879 1882 size_t hits = 0;
1880 1883 size_t misses = 0;
1881 1884 for (uint i = 0; i < _max_num_tasks; i++) {
1882 1885 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1883 1886 hits += stats.first;
1884 1887 misses += stats.second;
1885 1888 }
1886 1889 size_t sum = hits + misses;
1887 1890 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1888 1891 hits, misses, percent_of(hits, sum));
1889 1892 }
1890 1893
1891 1894 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1892 1895 _prev_mark_bitmap->clear_range(mr);
1893 1896 }
1894 1897
1895 1898 HeapRegion*
1896 1899 G1ConcurrentMark::claim_region(uint worker_id) {
1897 1900 // "checkpoint" the finger
1898 1901 HeapWord* finger = _finger;
1899 1902
1900 1903 while (finger < _heap.end()) {
1901 1904 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1902 1905
1903 1906 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1904 1907 // Make sure that the reads below do not float before loading curr_region.
1905 1908 OrderAccess::loadload();
1906 1909 // Above heap_region_containing may return NULL as we always scan claim
1907 1910 // until the end of the heap. In this case, just jump to the next region.
1908 1911 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1909 1912
1910 1913 // Is the gap between reading the finger and doing the CAS too long?
1911 1914 HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1912 1915 if (res == finger && curr_region != NULL) {
1913 1916 // we succeeded
1914 1917 HeapWord* bottom = curr_region->bottom();
1915 1918 HeapWord* limit = curr_region->next_top_at_mark_start();
1916 1919
1917 1920 // notice that _finger == end cannot be guaranteed here since,
1918 1921 // someone else might have moved the finger even further
1919 1922 assert(_finger >= end, "the finger should have moved forward");
1920 1923
1921 1924 if (limit > bottom) {
1922 1925 return curr_region;
1923 1926 } else {
1924 1927 assert(limit == bottom,
1925 1928 "the region limit should be at bottom");
1926 1929 // we return NULL and the caller should try calling
1927 1930 // claim_region() again.
1928 1931 return NULL;
1929 1932 }
1930 1933 } else {
1931 1934 assert(_finger > finger, "the finger should have moved forward");
1932 1935 // read it again
1933 1936 finger = _finger;
1934 1937 }
1935 1938 }
1936 1939
1937 1940 return NULL;
1938 1941 }
1939 1942
1940 1943 #ifndef PRODUCT
1941 1944 class VerifyNoCSetOops {
1942 1945 G1CollectedHeap* _g1h;
1943 1946 const char* _phase;
1944 1947 int _info;
1945 1948
1946 1949 public:
1947 1950 VerifyNoCSetOops(const char* phase, int info = -1) :
1948 1951 _g1h(G1CollectedHeap::heap()),
1949 1952 _phase(phase),
1950 1953 _info(info)
1951 1954 { }
1952 1955
1953 1956 void operator()(G1TaskQueueEntry task_entry) const {
1954 1957 if (task_entry.is_array_slice()) {
1955 1958 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1956 1959 return;
1957 1960 }
1958 1961 guarantee(oopDesc::is_oop(task_entry.obj()),
1959 1962 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1960 1963 p2i(task_entry.obj()), _phase, _info);
1961 1964 HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1962 1965 guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1963 1966 "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1964 1967 p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1965 1968 }
1966 1969 };
1967 1970
1968 1971 void G1ConcurrentMark::verify_no_collection_set_oops() {
1969 1972 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1970 1973 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1971 1974 return;
1972 1975 }
1973 1976
1974 1977 // Verify entries on the global mark stack
1975 1978 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1976 1979
1977 1980 // Verify entries on the task queues
1978 1981 for (uint i = 0; i < _max_num_tasks; ++i) {
1979 1982 G1CMTaskQueue* queue = _task_queues->queue(i);
1980 1983 queue->iterate(VerifyNoCSetOops("Queue", i));
1981 1984 }
1982 1985
1983 1986 // Verify the global finger
1984 1987 HeapWord* global_finger = finger();
1985 1988 if (global_finger != NULL && global_finger < _heap.end()) {
1986 1989 // Since we always iterate over all regions, we might get a NULL HeapRegion
1987 1990 // here.
1988 1991 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1989 1992 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1990 1993 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1991 1994 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1992 1995 }
1993 1996
1994 1997 // Verify the task fingers
1995 1998 assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1996 1999 for (uint i = 0; i < _num_concurrent_workers; ++i) {
1997 2000 G1CMTask* task = _tasks[i];
1998 2001 HeapWord* task_finger = task->finger();
1999 2002 if (task_finger != NULL && task_finger < _heap.end()) {
2000 2003 // See above note on the global finger verification.
2001 2004 HeapRegion* r = _g1h->heap_region_containing(task_finger);
2002 2005 guarantee(r == NULL || task_finger == r->bottom() ||
2003 2006 !r->in_collection_set() || !r->has_index_in_opt_cset(),
2004 2007 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2005 2008 p2i(task_finger), HR_FORMAT_PARAMS(r));
2006 2009 }
2007 2010 }
2008 2011 }
2009 2012 #endif // PRODUCT
2010 2013
2011 2014 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2012 2015 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2013 2016 }
2014 2017
2015 2018 void G1ConcurrentMark::print_stats() {
2016 2019 if (!log_is_enabled(Debug, gc, stats)) {
2017 2020 return;
2018 2021 }
2019 2022 log_debug(gc, stats)("---------------------------------------------------------------------");
2020 2023 for (size_t i = 0; i < _num_active_tasks; ++i) {
2021 2024 _tasks[i]->print_stats();
2022 2025 log_debug(gc, stats)("---------------------------------------------------------------------");
2023 2026 }
2024 2027 }
2025 2028
2026 2029 void G1ConcurrentMark::concurrent_cycle_abort() {
2027 2030 if (!cm_thread()->during_cycle() || _has_aborted) {
2028 2031 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2029 2032 return;
2030 2033 }
2031 2034
2032 2035 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2033 2036 // concurrent bitmap clearing.
2034 2037 {
2035 2038 GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2036 2039 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2037 2040 }
2038 2041 // Note we cannot clear the previous marking bitmap here
2039 2042 // since VerifyDuringGC verifies the objects marked during
2040 2043 // a full GC against the previous bitmap.
2041 2044
2042 2045 // Empty mark stack
2043 2046 reset_marking_for_restart();
2044 2047 for (uint i = 0; i < _max_num_tasks; ++i) {
2045 2048 _tasks[i]->clear_region_fields();
2046 2049 }
2047 2050 _first_overflow_barrier_sync.abort();
2048 2051 _second_overflow_barrier_sync.abort();
2049 2052 _has_aborted = true;
2050 2053
2051 2054 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2052 2055 satb_mq_set.abandon_partial_marking();
2053 2056 // This can be called either during or outside marking, we'll read
2054 2057 // the expected_active value from the SATB queue set.
2055 2058 satb_mq_set.set_active_all_threads(
2056 2059 false, /* new active value */
2057 2060 satb_mq_set.is_active() /* expected_active */);
2058 2061 }
2059 2062
2060 2063 static void print_ms_time_info(const char* prefix, const char* name,
2061 2064 NumberSeq& ns) {
2062 2065 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2063 2066 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2064 2067 if (ns.num() > 0) {
2065 2068 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2066 2069 prefix, ns.sd(), ns.maximum());
2067 2070 }
2068 2071 }
2069 2072
2070 2073 void G1ConcurrentMark::print_summary_info() {
2071 2074 Log(gc, marking) log;
2072 2075 if (!log.is_trace()) {
2073 2076 return;
2074 2077 }
2075 2078
2076 2079 log.trace(" Concurrent marking:");
2077 2080 print_ms_time_info(" ", "init marks", _init_times);
2078 2081 print_ms_time_info(" ", "remarks", _remark_times);
2079 2082 {
2080 2083 print_ms_time_info(" ", "final marks", _remark_mark_times);
2081 2084 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
2082 2085
2083 2086 }
2084 2087 print_ms_time_info(" ", "cleanups", _cleanup_times);
2085 2088 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2086 2089 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2087 2090 log.trace(" Total stop_world time = %8.2f s.",
2088 2091 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2089 2092 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).",
2090 2093 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2091 2094 }
2092 2095
2093 2096 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2094 2097 _concurrent_workers->print_worker_threads_on(st);
2095 2098 }
2096 2099
2097 2100 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2098 2101 _concurrent_workers->threads_do(tc);
2099 2102 }
2100 2103
2101 2104 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2102 2105 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2103 2106 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2104 2107 _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2105 2108 _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2106 2109 }
2107 2110
2108 2111 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2109 2112 ReferenceProcessor* result = g1h->ref_processor_cm();
2110 2113 assert(result != NULL, "CM reference processor should not be NULL");
2111 2114 return result;
2112 2115 }
2113 2116
2114 2117 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2115 2118 G1CMTask* task)
2116 2119 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2117 2120 _g1h(g1h), _task(task)
2118 2121 { }
2119 2122
2120 2123 void G1CMTask::setup_for_region(HeapRegion* hr) {
2121 2124 assert(hr != NULL,
2122 2125 "claim_region() should have filtered out NULL regions");
2123 2126 _curr_region = hr;
2124 2127 _finger = hr->bottom();
2125 2128 update_region_limit();
2126 2129 }
2127 2130
2128 2131 void G1CMTask::update_region_limit() {
2129 2132 HeapRegion* hr = _curr_region;
2130 2133 HeapWord* bottom = hr->bottom();
2131 2134 HeapWord* limit = hr->next_top_at_mark_start();
2132 2135
2133 2136 if (limit == bottom) {
2134 2137 // The region was collected underneath our feet.
2135 2138 // We set the finger to bottom to ensure that the bitmap
2136 2139 // iteration that will follow this will not do anything.
2137 2140 // (this is not a condition that holds when we set the region up,
2138 2141 // as the region is not supposed to be empty in the first place)
2139 2142 _finger = bottom;
2140 2143 } else if (limit >= _region_limit) {
2141 2144 assert(limit >= _finger, "peace of mind");
2142 2145 } else {
2143 2146 assert(limit < _region_limit, "only way to get here");
2144 2147 // This can happen under some pretty unusual circumstances. An
2145 2148 // evacuation pause empties the region underneath our feet (NTAMS
2146 2149 // at bottom). We then do some allocation in the region (NTAMS
2147 2150 // stays at bottom), followed by the region being used as a GC
2148 2151 // alloc region (NTAMS will move to top() and the objects
2149 2152 // originally below it will be grayed). All objects now marked in
2150 2153 // the region are explicitly grayed, if below the global finger,
2151 2154 // and we do not need in fact to scan anything else. So, we simply
2152 2155 // set _finger to be limit to ensure that the bitmap iteration
2153 2156 // doesn't do anything.
2154 2157 _finger = limit;
2155 2158 }
2156 2159
2157 2160 _region_limit = limit;
2158 2161 }
2159 2162
2160 2163 void G1CMTask::giveup_current_region() {
2161 2164 assert(_curr_region != NULL, "invariant");
2162 2165 clear_region_fields();
2163 2166 }
2164 2167
2165 2168 void G1CMTask::clear_region_fields() {
2166 2169 // Values for these three fields that indicate that we're not
2167 2170 // holding on to a region.
2168 2171 _curr_region = NULL;
2169 2172 _finger = NULL;
2170 2173 _region_limit = NULL;
2171 2174 }
2172 2175
2173 2176 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2174 2177 if (cm_oop_closure == NULL) {
2175 2178 assert(_cm_oop_closure != NULL, "invariant");
2176 2179 } else {
2177 2180 assert(_cm_oop_closure == NULL, "invariant");
2178 2181 }
2179 2182 _cm_oop_closure = cm_oop_closure;
2180 2183 }
2181 2184
2182 2185 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2183 2186 guarantee(next_mark_bitmap != NULL, "invariant");
2184 2187 _next_mark_bitmap = next_mark_bitmap;
2185 2188 clear_region_fields();
2186 2189
2187 2190 _calls = 0;
2188 2191 _elapsed_time_ms = 0.0;
2189 2192 _termination_time_ms = 0.0;
2190 2193 _termination_start_time_ms = 0.0;
2191 2194
2192 2195 _mark_stats_cache.reset();
2193 2196 }
2194 2197
2195 2198 bool G1CMTask::should_exit_termination() {
2196 2199 if (!regular_clock_call()) {
2197 2200 return true;
2198 2201 }
2199 2202
2200 2203 // This is called when we are in the termination protocol. We should
2201 2204 // quit if, for some reason, this task wants to abort or the global
2202 2205 // stack is not empty (this means that we can get work from it).
2203 2206 return !_cm->mark_stack_empty() || has_aborted();
2204 2207 }
2205 2208
2206 2209 void G1CMTask::reached_limit() {
2207 2210 assert(_words_scanned >= _words_scanned_limit ||
2208 2211 _refs_reached >= _refs_reached_limit ,
2209 2212 "shouldn't have been called otherwise");
2210 2213 abort_marking_if_regular_check_fail();
2211 2214 }
2212 2215
2213 2216 bool G1CMTask::regular_clock_call() {
2214 2217 if (has_aborted()) {
2215 2218 return false;
2216 2219 }
2217 2220
2218 2221 // First, we need to recalculate the words scanned and refs reached
2219 2222 // limits for the next clock call.
2220 2223 recalculate_limits();
2221 2224
2222 2225 // During the regular clock call we do the following
2223 2226
2224 2227 // (1) If an overflow has been flagged, then we abort.
2225 2228 if (_cm->has_overflown()) {
2226 2229 return false;
2227 2230 }
2228 2231
2229 2232 // If we are not concurrent (i.e. we're doing remark) we don't need
2230 2233 // to check anything else. The other steps are only needed during
2231 2234 // the concurrent marking phase.
2232 2235 if (!_cm->concurrent()) {
2233 2236 return true;
2234 2237 }
2235 2238
2236 2239 // (2) If marking has been aborted for Full GC, then we also abort.
2237 2240 if (_cm->has_aborted()) {
2238 2241 return false;
2239 2242 }
2240 2243
2241 2244 double curr_time_ms = os::elapsedVTime() * 1000.0;
2242 2245
2243 2246 // (4) We check whether we should yield. If we have to, then we abort.
2244 2247 if (SuspendibleThreadSet::should_yield()) {
2245 2248 // We should yield. To do this we abort the task. The caller is
2246 2249 // responsible for yielding.
2247 2250 return false;
2248 2251 }
2249 2252
2250 2253 // (5) We check whether we've reached our time quota. If we have,
2251 2254 // then we abort.
2252 2255 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2253 2256 if (elapsed_time_ms > _time_target_ms) {
2254 2257 _has_timed_out = true;
2255 2258 return false;
2256 2259 }
2257 2260
2258 2261 // (6) Finally, we check whether there are enough completed STAB
2259 2262 // buffers available for processing. If there are, we abort.
2260 2263 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2261 2264 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2262 2265 // we do need to process SATB buffers, we'll abort and restart
2263 2266 // the marking task to do so
2264 2267 return false;
2265 2268 }
2266 2269 return true;
2267 2270 }
2268 2271
2269 2272 void G1CMTask::recalculate_limits() {
2270 2273 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2271 2274 _words_scanned_limit = _real_words_scanned_limit;
2272 2275
2273 2276 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2274 2277 _refs_reached_limit = _real_refs_reached_limit;
2275 2278 }
2276 2279
2277 2280 void G1CMTask::decrease_limits() {
2278 2281 // This is called when we believe that we're going to do an infrequent
2279 2282 // operation which will increase the per byte scanned cost (i.e. move
2280 2283 // entries to/from the global stack). It basically tries to decrease the
2281 2284 // scanning limit so that the clock is called earlier.
2282 2285
2283 2286 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2284 2287 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2285 2288 }
2286 2289
2287 2290 void G1CMTask::move_entries_to_global_stack() {
2288 2291 // Local array where we'll store the entries that will be popped
2289 2292 // from the local queue.
2290 2293 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2291 2294
2292 2295 size_t n = 0;
2293 2296 G1TaskQueueEntry task_entry;
2294 2297 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2295 2298 buffer[n] = task_entry;
2296 2299 ++n;
2297 2300 }
2298 2301 if (n < G1CMMarkStack::EntriesPerChunk) {
2299 2302 buffer[n] = G1TaskQueueEntry();
2300 2303 }
2301 2304
2302 2305 if (n > 0) {
2303 2306 if (!_cm->mark_stack_push(buffer)) {
2304 2307 set_has_aborted();
2305 2308 }
2306 2309 }
2307 2310
2308 2311 // This operation was quite expensive, so decrease the limits.
2309 2312 decrease_limits();
2310 2313 }
2311 2314
2312 2315 bool G1CMTask::get_entries_from_global_stack() {
2313 2316 // Local array where we'll store the entries that will be popped
2314 2317 // from the global stack.
2315 2318 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2316 2319
2317 2320 if (!_cm->mark_stack_pop(buffer)) {
2318 2321 return false;
2319 2322 }
2320 2323
2321 2324 // We did actually pop at least one entry.
2322 2325 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2323 2326 G1TaskQueueEntry task_entry = buffer[i];
2324 2327 if (task_entry.is_null()) {
2325 2328 break;
2326 2329 }
2327 2330 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2328 2331 bool success = _task_queue->push(task_entry);
2329 2332 // We only call this when the local queue is empty or under a
2330 2333 // given target limit. So, we do not expect this push to fail.
2331 2334 assert(success, "invariant");
2332 2335 }
2333 2336
2334 2337 // This operation was quite expensive, so decrease the limits
2335 2338 decrease_limits();
2336 2339 return true;
2337 2340 }
2338 2341
2339 2342 void G1CMTask::drain_local_queue(bool partially) {
2340 2343 if (has_aborted()) {
2341 2344 return;
2342 2345 }
2343 2346
2344 2347 // Decide what the target size is, depending whether we're going to
2345 2348 // drain it partially (so that other tasks can steal if they run out
2346 2349 // of things to do) or totally (at the very end).
2347 2350 size_t target_size;
2348 2351 if (partially) {
2349 2352 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2350 2353 } else {
2351 2354 target_size = 0;
2352 2355 }
2353 2356
2354 2357 if (_task_queue->size() > target_size) {
2355 2358 G1TaskQueueEntry entry;
2356 2359 bool ret = _task_queue->pop_local(entry);
2357 2360 while (ret) {
2358 2361 scan_task_entry(entry);
2359 2362 if (_task_queue->size() <= target_size || has_aborted()) {
2360 2363 ret = false;
2361 2364 } else {
2362 2365 ret = _task_queue->pop_local(entry);
2363 2366 }
2364 2367 }
2365 2368 }
2366 2369 }
2367 2370
2368 2371 void G1CMTask::drain_global_stack(bool partially) {
2369 2372 if (has_aborted()) {
2370 2373 return;
2371 2374 }
2372 2375
2373 2376 // We have a policy to drain the local queue before we attempt to
2374 2377 // drain the global stack.
2375 2378 assert(partially || _task_queue->size() == 0, "invariant");
2376 2379
2377 2380 // Decide what the target size is, depending whether we're going to
2378 2381 // drain it partially (so that other tasks can steal if they run out
2379 2382 // of things to do) or totally (at the very end).
2380 2383 // Notice that when draining the global mark stack partially, due to the racyness
2381 2384 // of the mark stack size update we might in fact drop below the target. But,
2382 2385 // this is not a problem.
2383 2386 // In case of total draining, we simply process until the global mark stack is
2384 2387 // totally empty, disregarding the size counter.
2385 2388 if (partially) {
2386 2389 size_t const target_size = _cm->partial_mark_stack_size_target();
2387 2390 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2388 2391 if (get_entries_from_global_stack()) {
2389 2392 drain_local_queue(partially);
2390 2393 }
2391 2394 }
2392 2395 } else {
2393 2396 while (!has_aborted() && get_entries_from_global_stack()) {
2394 2397 drain_local_queue(partially);
2395 2398 }
2396 2399 }
2397 2400 }
2398 2401
2399 2402 // SATB Queue has several assumptions on whether to call the par or
2400 2403 // non-par versions of the methods. this is why some of the code is
2401 2404 // replicated. We should really get rid of the single-threaded version
2402 2405 // of the code to simplify things.
2403 2406 void G1CMTask::drain_satb_buffers() {
2404 2407 if (has_aborted()) {
2405 2408 return;
2406 2409 }
2407 2410
2408 2411 // We set this so that the regular clock knows that we're in the
2409 2412 // middle of draining buffers and doesn't set the abort flag when it
2410 2413 // notices that SATB buffers are available for draining. It'd be
2411 2414 // very counter productive if it did that. :-)
2412 2415 _draining_satb_buffers = true;
2413 2416
2414 2417 G1CMSATBBufferClosure satb_cl(this, _g1h);
2415 2418 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2416 2419
2417 2420 // This keeps claiming and applying the closure to completed buffers
2418 2421 // until we run out of buffers or we need to abort.
2419 2422 while (!has_aborted() &&
2420 2423 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2421 2424 abort_marking_if_regular_check_fail();
2422 2425 }
2423 2426
2424 2427 // Can't assert qset is empty here, even if not aborted. If concurrent,
2425 2428 // some other thread might be adding to the queue. If not concurrent,
2426 2429 // some other thread might have won the race for the last buffer, but
2427 2430 // has not yet decremented the count.
2428 2431
2429 2432 _draining_satb_buffers = false;
2430 2433
2431 2434 // again, this was a potentially expensive operation, decrease the
2432 2435 // limits to get the regular clock call early
2433 2436 decrease_limits();
2434 2437 }
2435 2438
2436 2439 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2437 2440 _mark_stats_cache.reset(region_idx);
2438 2441 }
2439 2442
2440 2443 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2441 2444 return _mark_stats_cache.evict_all();
2442 2445 }
2443 2446
2444 2447 void G1CMTask::print_stats() {
2445 2448 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2446 2449 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2447 2450 _elapsed_time_ms, _termination_time_ms);
2448 2451 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2449 2452 _step_times_ms.num(),
2450 2453 _step_times_ms.avg(),
2451 2454 _step_times_ms.sd(),
2452 2455 _step_times_ms.maximum(),
2453 2456 _step_times_ms.sum());
2454 2457 size_t const hits = _mark_stats_cache.hits();
2455 2458 size_t const misses = _mark_stats_cache.misses();
2456 2459 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2457 2460 hits, misses, percent_of(hits, hits + misses));
2458 2461 }
2459 2462
2460 2463 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2461 2464 return _task_queues->steal(worker_id, task_entry);
2462 2465 }
2463 2466
2464 2467 /*****************************************************************************
2465 2468
2466 2469 The do_marking_step(time_target_ms, ...) method is the building
2467 2470 block of the parallel marking framework. It can be called in parallel
2468 2471 with other invocations of do_marking_step() on different tasks
2469 2472 (but only one per task, obviously) and concurrently with the
2470 2473 mutator threads, or during remark, hence it eliminates the need
2471 2474 for two versions of the code. When called during remark, it will
2472 2475 pick up from where the task left off during the concurrent marking
2473 2476 phase. Interestingly, tasks are also claimable during evacuation
2474 2477 pauses too, since do_marking_step() ensures that it aborts before
2475 2478 it needs to yield.
2476 2479
2477 2480 The data structures that it uses to do marking work are the
2478 2481 following:
2479 2482
2480 2483 (1) Marking Bitmap. If there are gray objects that appear only
2481 2484 on the bitmap (this happens either when dealing with an overflow
2482 2485 or when the initial marking phase has simply marked the roots
2483 2486 and didn't push them on the stack), then tasks claim heap
2484 2487 regions whose bitmap they then scan to find gray objects. A
2485 2488 global finger indicates where the end of the last claimed region
2486 2489 is. A local finger indicates how far into the region a task has
2487 2490 scanned. The two fingers are used to determine how to gray an
2488 2491 object (i.e. whether simply marking it is OK, as it will be
2489 2492 visited by a task in the future, or whether it needs to be also
2490 2493 pushed on a stack).
2491 2494
2492 2495 (2) Local Queue. The local queue of the task which is accessed
2493 2496 reasonably efficiently by the task. Other tasks can steal from
2494 2497 it when they run out of work. Throughout the marking phase, a
2495 2498 task attempts to keep its local queue short but not totally
2496 2499 empty, so that entries are available for stealing by other
2497 2500 tasks. Only when there is no more work, a task will totally
2498 2501 drain its local queue.
2499 2502
2500 2503 (3) Global Mark Stack. This handles local queue overflow. During
2501 2504 marking only sets of entries are moved between it and the local
2502 2505 queues, as access to it requires a mutex and more fine-grain
2503 2506 interaction with it which might cause contention. If it
2504 2507 overflows, then the marking phase should restart and iterate
2505 2508 over the bitmap to identify gray objects. Throughout the marking
2506 2509 phase, tasks attempt to keep the global mark stack at a small
2507 2510 length but not totally empty, so that entries are available for
2508 2511 popping by other tasks. Only when there is no more work, tasks
2509 2512 will totally drain the global mark stack.
2510 2513
2511 2514 (4) SATB Buffer Queue. This is where completed SATB buffers are
2512 2515 made available. Buffers are regularly removed from this queue
2513 2516 and scanned for roots, so that the queue doesn't get too
2514 2517 long. During remark, all completed buffers are processed, as
2515 2518 well as the filled in parts of any uncompleted buffers.
2516 2519
2517 2520 The do_marking_step() method tries to abort when the time target
2518 2521 has been reached. There are a few other cases when the
2519 2522 do_marking_step() method also aborts:
2520 2523
2521 2524 (1) When the marking phase has been aborted (after a Full GC).
2522 2525
2523 2526 (2) When a global overflow (on the global stack) has been
2524 2527 triggered. Before the task aborts, it will actually sync up with
2525 2528 the other tasks to ensure that all the marking data structures
2526 2529 (local queues, stacks, fingers etc.) are re-initialized so that
2527 2530 when do_marking_step() completes, the marking phase can
2528 2531 immediately restart.
2529 2532
2530 2533 (3) When enough completed SATB buffers are available. The
2531 2534 do_marking_step() method only tries to drain SATB buffers right
2532 2535 at the beginning. So, if enough buffers are available, the
2533 2536 marking step aborts and the SATB buffers are processed at
2534 2537 the beginning of the next invocation.
2535 2538
2536 2539 (4) To yield. when we have to yield then we abort and yield
2537 2540 right at the end of do_marking_step(). This saves us from a lot
2538 2541 of hassle as, by yielding we might allow a Full GC. If this
2539 2542 happens then objects will be compacted underneath our feet, the
2540 2543 heap might shrink, etc. We save checking for this by just
2541 2544 aborting and doing the yield right at the end.
2542 2545
2543 2546 From the above it follows that the do_marking_step() method should
2544 2547 be called in a loop (or, otherwise, regularly) until it completes.
2545 2548
2546 2549 If a marking step completes without its has_aborted() flag being
2547 2550 true, it means it has completed the current marking phase (and
2548 2551 also all other marking tasks have done so and have all synced up).
2549 2552
2550 2553 A method called regular_clock_call() is invoked "regularly" (in
2551 2554 sub ms intervals) throughout marking. It is this clock method that
2552 2555 checks all the abort conditions which were mentioned above and
2553 2556 decides when the task should abort. A work-based scheme is used to
2554 2557 trigger this clock method: when the number of object words the
2555 2558 marking phase has scanned or the number of references the marking
2556 2559 phase has visited reach a given limit. Additional invocations to
2557 2560 the method clock have been planted in a few other strategic places
2558 2561 too. The initial reason for the clock method was to avoid calling
2559 2562 vtime too regularly, as it is quite expensive. So, once it was in
2560 2563 place, it was natural to piggy-back all the other conditions on it
2561 2564 too and not constantly check them throughout the code.
2562 2565
2563 2566 If do_termination is true then do_marking_step will enter its
2564 2567 termination protocol.
2565 2568
2566 2569 The value of is_serial must be true when do_marking_step is being
2567 2570 called serially (i.e. by the VMThread) and do_marking_step should
2568 2571 skip any synchronization in the termination and overflow code.
2569 2572 Examples include the serial remark code and the serial reference
2570 2573 processing closures.
2571 2574
2572 2575 The value of is_serial must be false when do_marking_step is
2573 2576 being called by any of the worker threads in a work gang.
2574 2577 Examples include the concurrent marking code (CMMarkingTask),
2575 2578 the MT remark code, and the MT reference processing closures.
2576 2579
2577 2580 *****************************************************************************/
2578 2581
2579 2582 void G1CMTask::do_marking_step(double time_target_ms,
2580 2583 bool do_termination,
2581 2584 bool is_serial) {
2582 2585 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2583 2586
2584 2587 _start_time_ms = os::elapsedVTime() * 1000.0;
2585 2588
2586 2589 // If do_stealing is true then do_marking_step will attempt to
2587 2590 // steal work from the other G1CMTasks. It only makes sense to
2588 2591 // enable stealing when the termination protocol is enabled
2589 2592 // and do_marking_step() is not being called serially.
2590 2593 bool do_stealing = do_termination && !is_serial;
2591 2594
2592 2595 G1Predictions const& predictor = _g1h->policy()->predictor();
2593 2596 double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
2594 2597 _time_target_ms = time_target_ms - diff_prediction_ms;
2595 2598
2596 2599 // set up the variables that are used in the work-based scheme to
2597 2600 // call the regular clock method
2598 2601 _words_scanned = 0;
2599 2602 _refs_reached = 0;
2600 2603 recalculate_limits();
2601 2604
2602 2605 // clear all flags
2603 2606 clear_has_aborted();
2604 2607 _has_timed_out = false;
2605 2608 _draining_satb_buffers = false;
2606 2609
2607 2610 ++_calls;
2608 2611
2609 2612 // Set up the bitmap and oop closures. Anything that uses them is
2610 2613 // eventually called from this method, so it is OK to allocate these
2611 2614 // statically.
2612 2615 G1CMBitMapClosure bitmap_closure(this, _cm);
2613 2616 G1CMOopClosure cm_oop_closure(_g1h, this);
2614 2617 set_cm_oop_closure(&cm_oop_closure);
2615 2618
2616 2619 if (_cm->has_overflown()) {
2617 2620 // This can happen if the mark stack overflows during a GC pause
2618 2621 // and this task, after a yield point, restarts. We have to abort
2619 2622 // as we need to get into the overflow protocol which happens
2620 2623 // right at the end of this task.
2621 2624 set_has_aborted();
2622 2625 }
2623 2626
2624 2627 // First drain any available SATB buffers. After this, we will not
2625 2628 // look at SATB buffers before the next invocation of this method.
2626 2629 // If enough completed SATB buffers are queued up, the regular clock
2627 2630 // will abort this task so that it restarts.
2628 2631 drain_satb_buffers();
2629 2632 // ...then partially drain the local queue and the global stack
2630 2633 drain_local_queue(true);
2631 2634 drain_global_stack(true);
2632 2635
2633 2636 do {
2634 2637 if (!has_aborted() && _curr_region != NULL) {
2635 2638 // This means that we're already holding on to a region.
2636 2639 assert(_finger != NULL, "if region is not NULL, then the finger "
2637 2640 "should not be NULL either");
2638 2641
2639 2642 // We might have restarted this task after an evacuation pause
2640 2643 // which might have evacuated the region we're holding on to
2641 2644 // underneath our feet. Let's read its limit again to make sure
2642 2645 // that we do not iterate over a region of the heap that
2643 2646 // contains garbage (update_region_limit() will also move
2644 2647 // _finger to the start of the region if it is found empty).
2645 2648 update_region_limit();
2646 2649 // We will start from _finger not from the start of the region,
2647 2650 // as we might be restarting this task after aborting half-way
2648 2651 // through scanning this region. In this case, _finger points to
2649 2652 // the address where we last found a marked object. If this is a
2650 2653 // fresh region, _finger points to start().
2651 2654 MemRegion mr = MemRegion(_finger, _region_limit);
2652 2655
2653 2656 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2654 2657 "humongous regions should go around loop once only");
2655 2658
2656 2659 // Some special cases:
2657 2660 // If the memory region is empty, we can just give up the region.
2658 2661 // If the current region is humongous then we only need to check
2659 2662 // the bitmap for the bit associated with the start of the object,
2660 2663 // scan the object if it's live, and give up the region.
2661 2664 // Otherwise, let's iterate over the bitmap of the part of the region
2662 2665 // that is left.
2663 2666 // If the iteration is successful, give up the region.
2664 2667 if (mr.is_empty()) {
2665 2668 giveup_current_region();
2666 2669 abort_marking_if_regular_check_fail();
2667 2670 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2668 2671 if (_next_mark_bitmap->is_marked(mr.start())) {
2669 2672 // The object is marked - apply the closure
2670 2673 bitmap_closure.do_addr(mr.start());
2671 2674 }
2672 2675 // Even if this task aborted while scanning the humongous object
2673 2676 // we can (and should) give up the current region.
2674 2677 giveup_current_region();
2675 2678 abort_marking_if_regular_check_fail();
2676 2679 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2677 2680 giveup_current_region();
2678 2681 abort_marking_if_regular_check_fail();
2679 2682 } else {
2680 2683 assert(has_aborted(), "currently the only way to do so");
2681 2684 // The only way to abort the bitmap iteration is to return
2682 2685 // false from the do_bit() method. However, inside the
2683 2686 // do_bit() method we move the _finger to point to the
2684 2687 // object currently being looked at. So, if we bail out, we
2685 2688 // have definitely set _finger to something non-null.
2686 2689 assert(_finger != NULL, "invariant");
2687 2690
2688 2691 // Region iteration was actually aborted. So now _finger
2689 2692 // points to the address of the object we last scanned. If we
2690 2693 // leave it there, when we restart this task, we will rescan
2691 2694 // the object. It is easy to avoid this. We move the finger by
2692 2695 // enough to point to the next possible object header.
2693 2696 assert(_finger < _region_limit, "invariant");
2694 2697 HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2695 2698 // Check if bitmap iteration was aborted while scanning the last object
2696 2699 if (new_finger >= _region_limit) {
2697 2700 giveup_current_region();
2698 2701 } else {
2699 2702 move_finger_to(new_finger);
2700 2703 }
2701 2704 }
2702 2705 }
2703 2706 // At this point we have either completed iterating over the
2704 2707 // region we were holding on to, or we have aborted.
2705 2708
2706 2709 // We then partially drain the local queue and the global stack.
2707 2710 // (Do we really need this?)
2708 2711 drain_local_queue(true);
2709 2712 drain_global_stack(true);
2710 2713
2711 2714 // Read the note on the claim_region() method on why it might
2712 2715 // return NULL with potentially more regions available for
2713 2716 // claiming and why we have to check out_of_regions() to determine
2714 2717 // whether we're done or not.
2715 2718 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2716 2719 // We are going to try to claim a new region. We should have
2717 2720 // given up on the previous one.
2718 2721 // Separated the asserts so that we know which one fires.
2719 2722 assert(_curr_region == NULL, "invariant");
2720 2723 assert(_finger == NULL, "invariant");
2721 2724 assert(_region_limit == NULL, "invariant");
2722 2725 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2723 2726 if (claimed_region != NULL) {
2724 2727 // Yes, we managed to claim one
2725 2728 setup_for_region(claimed_region);
2726 2729 assert(_curr_region == claimed_region, "invariant");
2727 2730 }
2728 2731 // It is important to call the regular clock here. It might take
2729 2732 // a while to claim a region if, for example, we hit a large
2730 2733 // block of empty regions. So we need to call the regular clock
2731 2734 // method once round the loop to make sure it's called
2732 2735 // frequently enough.
2733 2736 abort_marking_if_regular_check_fail();
2734 2737 }
2735 2738
2736 2739 if (!has_aborted() && _curr_region == NULL) {
2737 2740 assert(_cm->out_of_regions(),
2738 2741 "at this point we should be out of regions");
2739 2742 }
2740 2743 } while ( _curr_region != NULL && !has_aborted());
2741 2744
2742 2745 if (!has_aborted()) {
2743 2746 // We cannot check whether the global stack is empty, since other
2744 2747 // tasks might be pushing objects to it concurrently.
2745 2748 assert(_cm->out_of_regions(),
2746 2749 "at this point we should be out of regions");
2747 2750 // Try to reduce the number of available SATB buffers so that
2748 2751 // remark has less work to do.
2749 2752 drain_satb_buffers();
2750 2753 }
2751 2754
2752 2755 // Since we've done everything else, we can now totally drain the
2753 2756 // local queue and global stack.
2754 2757 drain_local_queue(false);
2755 2758 drain_global_stack(false);
2756 2759
2757 2760 // Attempt at work stealing from other task's queues.
2758 2761 if (do_stealing && !has_aborted()) {
2759 2762 // We have not aborted. This means that we have finished all that
2760 2763 // we could. Let's try to do some stealing...
2761 2764
2762 2765 // We cannot check whether the global stack is empty, since other
2763 2766 // tasks might be pushing objects to it concurrently.
2764 2767 assert(_cm->out_of_regions() && _task_queue->size() == 0,
2765 2768 "only way to reach here");
2766 2769 while (!has_aborted()) {
2767 2770 G1TaskQueueEntry entry;
2768 2771 if (_cm->try_stealing(_worker_id, entry)) {
2769 2772 scan_task_entry(entry);
2770 2773
2771 2774 // And since we're towards the end, let's totally drain the
2772 2775 // local queue and global stack.
2773 2776 drain_local_queue(false);
2774 2777 drain_global_stack(false);
2775 2778 } else {
2776 2779 break;
2777 2780 }
2778 2781 }
2779 2782 }
2780 2783
2781 2784 // We still haven't aborted. Now, let's try to get into the
2782 2785 // termination protocol.
2783 2786 if (do_termination && !has_aborted()) {
2784 2787 // We cannot check whether the global stack is empty, since other
2785 2788 // tasks might be concurrently pushing objects on it.
2786 2789 // Separated the asserts so that we know which one fires.
2787 2790 assert(_cm->out_of_regions(), "only way to reach here");
2788 2791 assert(_task_queue->size() == 0, "only way to reach here");
2789 2792 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2790 2793
2791 2794 // The G1CMTask class also extends the TerminatorTerminator class,
2792 2795 // hence its should_exit_termination() method will also decide
2793 2796 // whether to exit the termination protocol or not.
2794 2797 bool finished = (is_serial ||
2795 2798 _cm->terminator()->offer_termination(this));
2796 2799 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2797 2800 _termination_time_ms +=
2798 2801 termination_end_time_ms - _termination_start_time_ms;
2799 2802
2800 2803 if (finished) {
2801 2804 // We're all done.
2802 2805
2803 2806 // We can now guarantee that the global stack is empty, since
2804 2807 // all other tasks have finished. We separated the guarantees so
2805 2808 // that, if a condition is false, we can immediately find out
2806 2809 // which one.
2807 2810 guarantee(_cm->out_of_regions(), "only way to reach here");
2808 2811 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2809 2812 guarantee(_task_queue->size() == 0, "only way to reach here");
2810 2813 guarantee(!_cm->has_overflown(), "only way to reach here");
2811 2814 guarantee(!has_aborted(), "should never happen if termination has completed");
2812 2815 } else {
2813 2816 // Apparently there's more work to do. Let's abort this task. It
2814 2817 // will restart it and we can hopefully find more things to do.
2815 2818 set_has_aborted();
2816 2819 }
2817 2820 }
2818 2821
2819 2822 // Mainly for debugging purposes to make sure that a pointer to the
2820 2823 // closure which was statically allocated in this frame doesn't
2821 2824 // escape it by accident.
2822 2825 set_cm_oop_closure(NULL);
2823 2826 double end_time_ms = os::elapsedVTime() * 1000.0;
2824 2827 double elapsed_time_ms = end_time_ms - _start_time_ms;
2825 2828 // Update the step history.
2826 2829 _step_times_ms.add(elapsed_time_ms);
2827 2830
2828 2831 if (has_aborted()) {
2829 2832 // The task was aborted for some reason.
2830 2833 if (_has_timed_out) {
2831 2834 double diff_ms = elapsed_time_ms - _time_target_ms;
2832 2835 // Keep statistics of how well we did with respect to hitting
2833 2836 // our target only if we actually timed out (if we aborted for
2834 2837 // other reasons, then the results might get skewed).
2835 2838 _marking_step_diff_ms.add(diff_ms);
2836 2839 }
2837 2840
2838 2841 if (_cm->has_overflown()) {
2839 2842 // This is the interesting one. We aborted because a global
2840 2843 // overflow was raised. This means we have to restart the
2841 2844 // marking phase and start iterating over regions. However, in
2842 2845 // order to do this we have to make sure that all tasks stop
2843 2846 // what they are doing and re-initialize in a safe manner. We
2844 2847 // will achieve this with the use of two barrier sync points.
2845 2848
2846 2849 if (!is_serial) {
2847 2850 // We only need to enter the sync barrier if being called
2848 2851 // from a parallel context
2849 2852 _cm->enter_first_sync_barrier(_worker_id);
2850 2853
2851 2854 // When we exit this sync barrier we know that all tasks have
2852 2855 // stopped doing marking work. So, it's now safe to
2853 2856 // re-initialize our data structures.
2854 2857 }
2855 2858
2856 2859 clear_region_fields();
2857 2860 flush_mark_stats_cache();
2858 2861
2859 2862 if (!is_serial) {
2860 2863 // If we're executing the concurrent phase of marking, reset the marking
2861 2864 // state; otherwise the marking state is reset after reference processing,
2862 2865 // during the remark pause.
2863 2866 // If we reset here as a result of an overflow during the remark we will
2864 2867 // see assertion failures from any subsequent set_concurrency_and_phase()
2865 2868 // calls.
2866 2869 if (_cm->concurrent() && _worker_id == 0) {
2867 2870 // Worker 0 is responsible for clearing the global data structures because
2868 2871 // of an overflow. During STW we should not clear the overflow flag (in
2869 2872 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2870 2873 // method to abort the pause and restart concurrent marking.
2871 2874 _cm->reset_marking_for_restart();
2872 2875
2873 2876 log_info(gc, marking)("Concurrent Mark reset for overflow");
2874 2877 }
2875 2878
2876 2879 // ...and enter the second barrier.
2877 2880 _cm->enter_second_sync_barrier(_worker_id);
2878 2881 }
2879 2882 // At this point, if we're during the concurrent phase of
2880 2883 // marking, everything has been re-initialized and we're
2881 2884 // ready to restart.
2882 2885 }
2883 2886 }
2884 2887 }
2885 2888
2886 2889 G1CMTask::G1CMTask(uint worker_id,
2887 2890 G1ConcurrentMark* cm,
2888 2891 G1CMTaskQueue* task_queue,
2889 2892 G1RegionMarkStats* mark_stats,
2890 2893 uint max_regions) :
2891 2894 _objArray_processor(this),
2892 2895 _worker_id(worker_id),
2893 2896 _g1h(G1CollectedHeap::heap()),
2894 2897 _cm(cm),
2895 2898 _next_mark_bitmap(NULL),
2896 2899 _task_queue(task_queue),
2897 2900 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2898 2901 _calls(0),
2899 2902 _time_target_ms(0.0),
2900 2903 _start_time_ms(0.0),
2901 2904 _cm_oop_closure(NULL),
2902 2905 _curr_region(NULL),
2903 2906 _finger(NULL),
2904 2907 _region_limit(NULL),
2905 2908 _words_scanned(0),
2906 2909 _words_scanned_limit(0),
2907 2910 _real_words_scanned_limit(0),
2908 2911 _refs_reached(0),
2909 2912 _refs_reached_limit(0),
2910 2913 _real_refs_reached_limit(0),
2911 2914 _has_aborted(false),
2912 2915 _has_timed_out(false),
2913 2916 _draining_satb_buffers(false),
2914 2917 _step_times_ms(),
2915 2918 _elapsed_time_ms(0.0),
2916 2919 _termination_time_ms(0.0),
2917 2920 _termination_start_time_ms(0.0),
2918 2921 _marking_step_diff_ms()
2919 2922 {
2920 2923 guarantee(task_queue != NULL, "invariant");
2921 2924
2922 2925 _marking_step_diff_ms.add(0.5);
2923 2926 }
2924 2927
2925 2928 // These are formatting macros that are used below to ensure
2926 2929 // consistent formatting. The *_H_* versions are used to format the
2927 2930 // header for a particular value and they should be kept consistent
2928 2931 // with the corresponding macro. Also note that most of the macros add
2929 2932 // the necessary white space (as a prefix) which makes them a bit
2930 2933 // easier to compose.
2931 2934
2932 2935 // All the output lines are prefixed with this string to be able to
2933 2936 // identify them easily in a large log file.
2934 2937 #define G1PPRL_LINE_PREFIX "###"
2935 2938
2936 2939 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
2937 2940 #ifdef _LP64
2938 2941 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
2939 2942 #else // _LP64
2940 2943 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
2941 2944 #endif // _LP64
2942 2945
2943 2946 // For per-region info
2944 2947 #define G1PPRL_TYPE_FORMAT " %-4s"
2945 2948 #define G1PPRL_TYPE_H_FORMAT " %4s"
2946 2949 #define G1PPRL_STATE_FORMAT " %-5s"
2947 2950 #define G1PPRL_STATE_H_FORMAT " %5s"
2948 2951 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
2949 2952 #define G1PPRL_BYTE_H_FORMAT " %9s"
2950 2953 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
2951 2954 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
2952 2955
2953 2956 // For summary info
2954 2957 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
2955 2958 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
2956 2959 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
2957 2960 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2958 2961
2959 2962 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2960 2963 _total_used_bytes(0), _total_capacity_bytes(0),
2961 2964 _total_prev_live_bytes(0), _total_next_live_bytes(0),
2962 2965 _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2963 2966 {
2964 2967 if (!log_is_enabled(Trace, gc, liveness)) {
2965 2968 return;
2966 2969 }
2967 2970
2968 2971 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2969 2972 MemRegion g1_reserved = g1h->g1_reserved();
2970 2973 double now = os::elapsedTime();
2971 2974
2972 2975 // Print the header of the output.
2973 2976 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2974 2977 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2975 2978 G1PPRL_SUM_ADDR_FORMAT("reserved")
2976 2979 G1PPRL_SUM_BYTE_FORMAT("region-size"),
2977 2980 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2978 2981 HeapRegion::GrainBytes);
2979 2982 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2980 2983 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2981 2984 G1PPRL_TYPE_H_FORMAT
2982 2985 G1PPRL_ADDR_BASE_H_FORMAT
2983 2986 G1PPRL_BYTE_H_FORMAT
2984 2987 G1PPRL_BYTE_H_FORMAT
2985 2988 G1PPRL_BYTE_H_FORMAT
2986 2989 G1PPRL_DOUBLE_H_FORMAT
2987 2990 G1PPRL_BYTE_H_FORMAT
2988 2991 G1PPRL_STATE_H_FORMAT
2989 2992 G1PPRL_BYTE_H_FORMAT,
2990 2993 "type", "address-range",
2991 2994 "used", "prev-live", "next-live", "gc-eff",
2992 2995 "remset", "state", "code-roots");
2993 2996 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2994 2997 G1PPRL_TYPE_H_FORMAT
2995 2998 G1PPRL_ADDR_BASE_H_FORMAT
2996 2999 G1PPRL_BYTE_H_FORMAT
2997 3000 G1PPRL_BYTE_H_FORMAT
2998 3001 G1PPRL_BYTE_H_FORMAT
2999 3002 G1PPRL_DOUBLE_H_FORMAT
3000 3003 G1PPRL_BYTE_H_FORMAT
3001 3004 G1PPRL_STATE_H_FORMAT
3002 3005 G1PPRL_BYTE_H_FORMAT,
3003 3006 "", "",
3004 3007 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3005 3008 "(bytes)", "", "(bytes)");
3006 3009 }
3007 3010
3008 3011 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3009 3012 if (!log_is_enabled(Trace, gc, liveness)) {
3010 3013 return false;
3011 3014 }
3012 3015
3013 3016 const char* type = r->get_type_str();
3014 3017 HeapWord* bottom = r->bottom();
3015 3018 HeapWord* end = r->end();
3016 3019 size_t capacity_bytes = r->capacity();
3017 3020 size_t used_bytes = r->used();
3018 3021 size_t prev_live_bytes = r->live_bytes();
3019 3022 size_t next_live_bytes = r->next_live_bytes();
3020 3023 double gc_eff = r->gc_efficiency();
3021 3024 size_t remset_bytes = r->rem_set()->mem_size();
3022 3025 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3023 3026 const char* remset_type = r->rem_set()->get_short_state_str();
3024 3027
3025 3028 _total_used_bytes += used_bytes;
3026 3029 _total_capacity_bytes += capacity_bytes;
3027 3030 _total_prev_live_bytes += prev_live_bytes;
3028 3031 _total_next_live_bytes += next_live_bytes;
3029 3032 _total_remset_bytes += remset_bytes;
3030 3033 _total_strong_code_roots_bytes += strong_code_roots_bytes;
3031 3034
3032 3035 // Print a line for this particular region.
3033 3036 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3034 3037 G1PPRL_TYPE_FORMAT
3035 3038 G1PPRL_ADDR_BASE_FORMAT
3036 3039 G1PPRL_BYTE_FORMAT
3037 3040 G1PPRL_BYTE_FORMAT
3038 3041 G1PPRL_BYTE_FORMAT
3039 3042 G1PPRL_DOUBLE_FORMAT
3040 3043 G1PPRL_BYTE_FORMAT
3041 3044 G1PPRL_STATE_FORMAT
3042 3045 G1PPRL_BYTE_FORMAT,
3043 3046 type, p2i(bottom), p2i(end),
3044 3047 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3045 3048 remset_bytes, remset_type, strong_code_roots_bytes);
3046 3049
3047 3050 return false;
3048 3051 }
3049 3052
3050 3053 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3051 3054 if (!log_is_enabled(Trace, gc, liveness)) {
3052 3055 return;
3053 3056 }
3054 3057
3055 3058 // add static memory usages to remembered set sizes
3056 3059 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3057 3060 // Print the footer of the output.
3058 3061 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3059 3062 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3060 3063 " SUMMARY"
3061 3064 G1PPRL_SUM_MB_FORMAT("capacity")
3062 3065 G1PPRL_SUM_MB_PERC_FORMAT("used")
3063 3066 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3064 3067 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3065 3068 G1PPRL_SUM_MB_FORMAT("remset")
3066 3069 G1PPRL_SUM_MB_FORMAT("code-roots"),
3067 3070 bytes_to_mb(_total_capacity_bytes),
3068 3071 bytes_to_mb(_total_used_bytes),
3069 3072 percent_of(_total_used_bytes, _total_capacity_bytes),
3070 3073 bytes_to_mb(_total_prev_live_bytes),
3071 3074 percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3072 3075 bytes_to_mb(_total_next_live_bytes),
3073 3076 percent_of(_total_next_live_bytes, _total_capacity_bytes),
3074 3077 bytes_to_mb(_total_remset_bytes),
3075 3078 bytes_to_mb(_total_strong_code_roots_bytes));
3076 3079 }
↓ open down ↓ |
1683 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX