28 #include "code/codeCache.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psMarkSweep.hpp"
32 #include "gc/parallel/psMarkSweepDecorator.hpp"
33 #include "gc/parallel/psOldGen.hpp"
34 #include "gc/parallel/psScavenge.hpp"
35 #include "gc/parallel/psYoungGen.hpp"
36 #include "gc/serial/markSweep.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/biasedLocking.hpp"
50 #include "runtime/fprofiler.hpp"
51 #include "runtime/safepoint.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "services/management.hpp"
54 #include "services/memoryService.hpp"
55 #include "utilities/events.hpp"
56 #include "utilities/stack.inline.hpp"
57
58 elapsedTimer PSMarkSweep::_accumulated_time;
59 jlong PSMarkSweep::_time_of_last_gc = 0;
60 CollectorCounters* PSMarkSweep::_counters = NULL;
61
62 void PSMarkSweep::initialize() {
63 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
64 set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
65 _counters = new CollectorCounters("PSMarkSweep", 1);
66 }
67
119 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
120
121 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
122
123 // The scope of casr should end after code that can change
124 // CollectorPolicy::_should_clear_all_soft_refs.
125 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
126
127 PSYoungGen* young_gen = heap->young_gen();
128 PSOldGen* old_gen = heap->old_gen();
129
130 // Increment the invocation count
131 heap->increment_total_collections(true /* full */);
132
133 // Save information needed to minimize mangling
134 heap->record_gen_tops_before_GC();
135
136 // We need to track unique mark sweep invocations as well.
137 _total_invocations++;
138
139 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
140
141 heap->print_heap_before_gc();
142 heap->trace_heap_before_gc(_gc_tracer);
143
144 // Fill in TLABs
145 heap->accumulate_statistics_all_tlabs();
146 heap->ensure_parsability(true); // retire TLABs
147
148 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
149 HandleMark hm; // Discard invalid handles created during verification
150 Universe::verify(" VerifyBeforeGC:");
151 }
152
153 // Verify object start arrays
154 if (VerifyObjectStartArray &&
155 VerifyBeforeGC) {
156 old_gen->verify_object_start_array();
157 }
158
159 heap->pre_full_gc_dump(_gc_timer);
160
161 // Filled in below to track the state of the young gen after the collection.
162 bool eden_empty;
163 bool survivors_empty;
164 bool young_gen_empty;
165
166 {
167 HandleMark hm;
168
169 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
170 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
171 TraceCollectorStats tcs(counters());
172 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
173
174 if (TraceOldGenTime) accumulated_time()->start();
175
176 // Let the size policy know we're starting
177 size_policy->major_collection_begin();
178
179 CodeCache::gc_prologue();
180 BiasedLocking::preserve_marks();
181
182 // Capture heap size before collection for printing.
183 size_t prev_used = heap->used();
184
185 // Capture metadata size before collection for sizing.
186 size_t metadata_prev_used = MetaspaceAux::used_bytes();
187
188 // For PrintGCDetails
189 size_t old_gen_prev_used = old_gen->used_in_bytes();
190 size_t young_gen_prev_used = young_gen->used_in_bytes();
191
192 allocate_stacks();
193
194 #if defined(COMPILER2) || INCLUDE_JVMCI
195 DerivedPointerTable::clear();
196 #endif
197
198 ref_processor()->enable_discovery();
199 ref_processor()->setup_policy(clear_all_softrefs);
200
201 mark_sweep_phase1(clear_all_softrefs);
202
203 mark_sweep_phase2();
204
248 MetaspaceAux::verify_metrics();
249
250 BiasedLocking::restore_marks();
251 CodeCache::gc_epilogue();
252 JvmtiExport::gc_epilogue();
253
254 #if defined(COMPILER2) || INCLUDE_JVMCI
255 DerivedPointerTable::update_pointers();
256 #endif
257
258 ref_processor()->enqueue_discovered_references(NULL);
259
260 // Update time of last GC
261 reset_millis_since_last_gc();
262
263 // Let the size policy know we're done
264 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
265
266 if (UseAdaptiveSizePolicy) {
267
268 if (PrintAdaptiveSizePolicy) {
269 gclog_or_tty->print("AdaptiveSizeStart: ");
270 gclog_or_tty->stamp();
271 gclog_or_tty->print_cr(" collection: %d ",
272 heap->total_collections());
273 if (Verbose) {
274 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
275 " young_gen_capacity: " SIZE_FORMAT,
276 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
277 }
278 }
279
280 // Don't check if the size_policy is ready here. Let
281 // the size_policy check that internally.
282 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
283 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
284 // Swap the survivor spaces if from_space is empty. The
285 // resize_young_gen() called below is normally used after
286 // a successful young GC and swapping of survivor spaces;
287 // otherwise, it will fail to resize the young gen with
288 // the current implementation.
289 if (young_gen->from_space()->is_empty()) {
290 young_gen->from_space()->clear(SpaceDecorator::Mangle);
291 young_gen->swap_spaces();
292 }
293
294 // Calculate optimal free space amounts
295 assert(young_gen->max_size() >
296 young_gen->from_space()->capacity_in_bytes() +
297 young_gen->to_space()->capacity_in_bytes(),
298 "Sizes of space in young gen are out-of-bounds");
315 cur_eden,
316 max_old_gen_size,
317 max_eden_size,
318 true /* full gc*/);
319
320 size_policy->check_gc_overhead_limit(young_live,
321 eden_live,
322 max_old_gen_size,
323 max_eden_size,
324 true /* full gc*/,
325 gc_cause,
326 heap->collector_policy());
327
328 size_policy->decay_supplemental_growth(true /* full gc*/);
329
330 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
331
332 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
333 size_policy->calculated_survivor_size_in_bytes());
334 }
335 if (PrintAdaptiveSizePolicy) {
336 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
337 heap->total_collections());
338 }
339 }
340
341 if (UsePerfData) {
342 heap->gc_policy_counters()->update_counters();
343 heap->gc_policy_counters()->update_old_capacity(
344 old_gen->capacity_in_bytes());
345 heap->gc_policy_counters()->update_young_capacity(
346 young_gen->capacity_in_bytes());
347 }
348
349 heap->resize_all_tlabs();
350
351 // We collected the heap, recalculate the metaspace capacity
352 MetaspaceGC::compute_new_size();
353
354 if (TraceOldGenTime) accumulated_time()->stop();
355
356 if (PrintGC) {
357 if (PrintGCDetails) {
358 // Don't print a GC timestamp here. This is after the GC so
359 // would be confusing.
360 young_gen->print_used_change(young_gen_prev_used);
361 old_gen->print_used_change(old_gen_prev_used);
362 }
363 heap->print_heap_change(prev_used);
364 if (PrintGCDetails) {
365 MetaspaceAux::print_metaspace_change(metadata_prev_used);
366 }
367 }
368
369 // Track memory usage and detect low memory
370 MemoryService::track_memory_usage();
371 heap->update_counters();
372 }
373
374 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
375 HandleMark hm; // Discard invalid handles created during verification
376 Universe::verify(" VerifyAfterGC:");
377 }
378
379 // Re-verify object start arrays
380 if (VerifyObjectStartArray &&
381 VerifyAfterGC) {
382 old_gen->verify_object_start_array();
383 }
384
385 if (ZapUnusedHeapArea) {
386 old_gen->object_space()->check_mangled_unused_area_complete();
387 }
388
389 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
390
391 heap->print_heap_after_gc();
392 heap->trace_heap_after_gc(_gc_tracer);
393
394 heap->post_full_gc_dump(_gc_timer);
395
396 #ifdef TRACESPINNING
397 ParallelTaskTerminator::print_termination_counts();
398 #endif
399
400 _gc_timer->register_gc_end();
401
402 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
403
404 return true;
405 }
406
407 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
408 PSYoungGen* young_gen,
409 PSOldGen* old_gen) {
410 MutableSpace* const eden_space = young_gen->eden_space();
411 assert(!eden_space->is_empty(), "eden must be non-empty");
412 assert(young_gen->virtual_space()->alignment() ==
413 old_gen->virtual_space()->alignment(), "alignments do not match");
414
415 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
416 return false;
417 }
418
419 // Both generations must be completely committed.
425 }
426
427 // Figure out how much to take from eden. Include the average amount promoted
428 // in the total; otherwise the next young gen GC will simply bail out to a
429 // full GC.
430 const size_t alignment = old_gen->virtual_space()->alignment();
431 const size_t eden_used = eden_space->used_in_bytes();
432 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
433 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
434 const size_t eden_capacity = eden_space->capacity_in_bytes();
435
436 if (absorb_size >= eden_capacity) {
437 return false; // Must leave some space in eden.
438 }
439
440 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
441 if (new_young_size < young_gen->min_gen_size()) {
442 return false; // Respect young gen minimum size.
443 }
444
445 if (TraceAdaptiveGCBoundary && Verbose) {
446 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
447 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
448 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
449 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
450 absorb_size / K,
451 eden_capacity / K, (eden_capacity - absorb_size) / K,
452 young_gen->from_space()->used_in_bytes() / K,
453 young_gen->to_space()->used_in_bytes() / K,
454 young_gen->capacity_in_bytes() / K, new_young_size / K);
455 }
456
457 // Fill the unused part of the old gen.
458 MutableSpace* const old_space = old_gen->object_space();
459 HeapWord* const unused_start = old_space->top();
460 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
461
462 if (unused_words > 0) {
463 if (unused_words < CollectedHeap::min_fill_size()) {
464 return false; // If the old gen cannot be filled, must give up.
465 }
466 CollectedHeap::fill_with_objects(unused_start, unused_words);
467 }
468
469 // Take the live data from eden and set both top and end in the old gen to
470 // eden top. (Need to set end because reset_after_change() mangles the region
471 // from end to virtual_space->high() in debug builds).
472 HeapWord* const new_top = eden_space->top();
473 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
474 absorb_size);
475 young_gen->reset_after_change();
499 MutableSpace* to_space = young_gen->to_space();
500 _preserved_marks = (PreservedMark*)to_space->top();
501 _preserved_count = 0;
502
503 // We want to calculate the size in bytes first.
504 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
505 // Now divide by the size of a PreservedMark
506 _preserved_count_max /= sizeof(PreservedMark);
507 }
508
509
510 void PSMarkSweep::deallocate_stacks() {
511 _preserved_mark_stack.clear(true);
512 _preserved_oop_stack.clear(true);
513 _marking_stack.clear();
514 _objarray_stack.clear(true);
515 }
516
517 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
518 // Recursively traverse all live objects and mark them
519 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
520
521 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
522
523 // Need to clear claim bits before the tracing starts.
524 ClassLoaderDataGraph::clear_claimed_marks();
525
526 // General strong roots.
527 {
528 ParallelScavengeHeap::ParStrongRootsScope psrs;
529 Universe::oops_do(mark_and_push_closure());
530 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
531 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
532 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
533 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
534 ObjectSynchronizer::oops_do(mark_and_push_closure());
535 FlatProfiler::oops_do(mark_and_push_closure());
536 Management::oops_do(mark_and_push_closure());
537 JvmtiExport::oops_do(mark_and_push_closure());
538 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
539 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
558
559 // Unload classes and purge the SystemDictionary.
560 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
561
562 // Unload nmethods.
563 CodeCache::do_unloading(is_alive_closure(), purged_class);
564
565 // Prune dead klasses from subklass/sibling/implementor lists.
566 Klass::clean_weak_klass_links(is_alive_closure());
567
568 // Delete entries for dead interned strings.
569 StringTable::unlink(is_alive_closure());
570
571 // Clean up unreferenced symbols in symbol table.
572 SymbolTable::unlink();
573 _gc_tracer->report_object_count_after_gc(is_alive_closure());
574 }
575
576
577 void PSMarkSweep::mark_sweep_phase2() {
578 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
579
580 // Now all live objects are marked, compute the new object addresses.
581
582 // It is not required that we traverse spaces in the same order in
583 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
584 // tracking expects us to do so. See comment under phase4.
585
586 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
587 PSOldGen* old_gen = heap->old_gen();
588
589 // Begin compacting into the old gen
590 PSMarkSweepDecorator::set_destination_decorator_tenured();
591
592 // This will also compact the young gen spaces.
593 old_gen->precompact();
594 }
595
596 // This should be moved to the shared markSweep code!
597 class PSAlwaysTrueClosure: public BoolObjectClosure {
598 public:
599 bool do_object_b(oop p) { return true; }
600 };
601 static PSAlwaysTrueClosure always_true;
602
603 void PSMarkSweep::mark_sweep_phase3() {
604 // Adjust the pointers to reflect the new locations
605 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
606
607 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
608 PSYoungGen* young_gen = heap->young_gen();
609 PSOldGen* old_gen = heap->old_gen();
610
611 // Need to clear claim bits before the tracing starts.
612 ClassLoaderDataGraph::clear_claimed_marks();
613
614 // General strong roots.
615 Universe::oops_do(adjust_pointer_closure());
616 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
617 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
618 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
619 ObjectSynchronizer::oops_do(adjust_pointer_closure());
620 FlatProfiler::oops_do(adjust_pointer_closure());
621 Management::oops_do(adjust_pointer_closure());
622 JvmtiExport::oops_do(adjust_pointer_closure());
623 SystemDictionary::oops_do(adjust_pointer_closure());
624 ClassLoaderDataGraph::cld_do(adjust_cld_closure());
625
626 // Now adjust pointers in remaining weak roots. (All of which should
627 // have been cleared if they pointed to non-surviving objects.)
628 // Global (weak) JNI handles
629 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
630
631 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
632 CodeCache::blobs_do(&adjust_from_blobs);
633 StringTable::oops_do(adjust_pointer_closure());
634 ref_processor()->weak_oops_do(adjust_pointer_closure());
635 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
636
637 adjust_marks();
638
639 young_gen->adjust_pointers();
640 old_gen->adjust_pointers();
641 }
642
643 void PSMarkSweep::mark_sweep_phase4() {
644 EventMark m("4 compact heap");
645 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
646
647 // All pointers are now adjusted, move objects accordingly
648
649 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
650 PSYoungGen* young_gen = heap->young_gen();
651 PSOldGen* old_gen = heap->old_gen();
652
653 old_gen->compact();
654 young_gen->compact();
655 }
656
657 jlong PSMarkSweep::millis_since_last_gc() {
658 // We need a monotonically non-decreasing time in ms but
659 // os::javaTimeMillis() does not guarantee monotonicity.
660 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
661 jlong ret_val = now - _time_of_last_gc;
662 // XXX See note in genCollectedHeap::millis_since_last_gc().
663 if (ret_val < 0) {
664 NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
665 return 0;
|
28 #include "code/codeCache.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psMarkSweep.hpp"
32 #include "gc/parallel/psMarkSweepDecorator.hpp"
33 #include "gc/parallel/psOldGen.hpp"
34 #include "gc/parallel/psScavenge.hpp"
35 #include "gc/parallel/psYoungGen.hpp"
36 #include "gc/serial/markSweep.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "logging/log.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/safepoint.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/management.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/events.hpp"
57 #include "utilities/stack.inline.hpp"
58
59 elapsedTimer PSMarkSweep::_accumulated_time;
60 jlong PSMarkSweep::_time_of_last_gc = 0;
61 CollectorCounters* PSMarkSweep::_counters = NULL;
62
63 void PSMarkSweep::initialize() {
64 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
65 set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
66 _counters = new CollectorCounters("PSMarkSweep", 1);
67 }
68
120 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
121
122 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
123
124 // The scope of casr should end after code that can change
125 // CollectorPolicy::_should_clear_all_soft_refs.
126 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
127
128 PSYoungGen* young_gen = heap->young_gen();
129 PSOldGen* old_gen = heap->old_gen();
130
131 // Increment the invocation count
132 heap->increment_total_collections(true /* full */);
133
134 // Save information needed to minimize mangling
135 heap->record_gen_tops_before_GC();
136
137 // We need to track unique mark sweep invocations as well.
138 _total_invocations++;
139
140 heap->print_heap_before_gc();
141 heap->trace_heap_before_gc(_gc_tracer);
142
143 // Fill in TLABs
144 heap->accumulate_statistics_all_tlabs();
145 heap->ensure_parsability(true); // retire TLABs
146
147 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
148 HandleMark hm; // Discard invalid handles created during verification
149 Universe::verify("Before GC");
150 }
151
152 // Verify object start arrays
153 if (VerifyObjectStartArray &&
154 VerifyBeforeGC) {
155 old_gen->verify_object_start_array();
156 }
157
158 heap->pre_full_gc_dump(_gc_timer);
159
160 // Filled in below to track the state of the young gen after the collection.
161 bool eden_empty;
162 bool survivors_empty;
163 bool young_gen_empty;
164
165 {
166 HandleMark hm;
167
168 GCTraceCPUTime tcpu;
169 GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
170 TraceCollectorStats tcs(counters());
171 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
172
173 if (TraceOldGenTime) accumulated_time()->start();
174
175 // Let the size policy know we're starting
176 size_policy->major_collection_begin();
177
178 CodeCache::gc_prologue();
179 BiasedLocking::preserve_marks();
180
181 // Capture metadata size before collection for sizing.
182 size_t metadata_prev_used = MetaspaceAux::used_bytes();
183
184 // For PrintGCDetails
185 size_t old_gen_prev_used = old_gen->used_in_bytes();
186 size_t young_gen_prev_used = young_gen->used_in_bytes();
187
188 allocate_stacks();
189
190 #if defined(COMPILER2) || INCLUDE_JVMCI
191 DerivedPointerTable::clear();
192 #endif
193
194 ref_processor()->enable_discovery();
195 ref_processor()->setup_policy(clear_all_softrefs);
196
197 mark_sweep_phase1(clear_all_softrefs);
198
199 mark_sweep_phase2();
200
244 MetaspaceAux::verify_metrics();
245
246 BiasedLocking::restore_marks();
247 CodeCache::gc_epilogue();
248 JvmtiExport::gc_epilogue();
249
250 #if defined(COMPILER2) || INCLUDE_JVMCI
251 DerivedPointerTable::update_pointers();
252 #endif
253
254 ref_processor()->enqueue_discovered_references(NULL);
255
256 // Update time of last GC
257 reset_millis_since_last_gc();
258
259 // Let the size policy know we're done
260 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
261
262 if (UseAdaptiveSizePolicy) {
263
264 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
265 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
266 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
267
268 // Don't check if the size_policy is ready here. Let
269 // the size_policy check that internally.
270 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
271 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
272 // Swap the survivor spaces if from_space is empty. The
273 // resize_young_gen() called below is normally used after
274 // a successful young GC and swapping of survivor spaces;
275 // otherwise, it will fail to resize the young gen with
276 // the current implementation.
277 if (young_gen->from_space()->is_empty()) {
278 young_gen->from_space()->clear(SpaceDecorator::Mangle);
279 young_gen->swap_spaces();
280 }
281
282 // Calculate optimal free space amounts
283 assert(young_gen->max_size() >
284 young_gen->from_space()->capacity_in_bytes() +
285 young_gen->to_space()->capacity_in_bytes(),
286 "Sizes of space in young gen are out-of-bounds");
303 cur_eden,
304 max_old_gen_size,
305 max_eden_size,
306 true /* full gc*/);
307
308 size_policy->check_gc_overhead_limit(young_live,
309 eden_live,
310 max_old_gen_size,
311 max_eden_size,
312 true /* full gc*/,
313 gc_cause,
314 heap->collector_policy());
315
316 size_policy->decay_supplemental_growth(true /* full gc*/);
317
318 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
319
320 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
321 size_policy->calculated_survivor_size_in_bytes());
322 }
323 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
324 }
325
326 if (UsePerfData) {
327 heap->gc_policy_counters()->update_counters();
328 heap->gc_policy_counters()->update_old_capacity(
329 old_gen->capacity_in_bytes());
330 heap->gc_policy_counters()->update_young_capacity(
331 young_gen->capacity_in_bytes());
332 }
333
334 heap->resize_all_tlabs();
335
336 // We collected the heap, recalculate the metaspace capacity
337 MetaspaceGC::compute_new_size();
338
339 if (TraceOldGenTime) accumulated_time()->stop();
340
341 if (PrintGC) {
342 if (PrintGCDetails) {
343 // Don't print a GC timestamp here. This is after the GC so
344 // would be confusing.
345 young_gen->print_used_change(young_gen_prev_used);
346 old_gen->print_used_change(old_gen_prev_used);
347 }
348 if (PrintGCDetails) {
349 MetaspaceAux::print_metaspace_change(metadata_prev_used);
350 }
351 }
352
353 // Track memory usage and detect low memory
354 MemoryService::track_memory_usage();
355 heap->update_counters();
356 }
357
358 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
359 HandleMark hm; // Discard invalid handles created during verification
360 Universe::verify("After GC");
361 }
362
363 // Re-verify object start arrays
364 if (VerifyObjectStartArray &&
365 VerifyAfterGC) {
366 old_gen->verify_object_start_array();
367 }
368
369 if (ZapUnusedHeapArea) {
370 old_gen->object_space()->check_mangled_unused_area_complete();
371 }
372
373 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
374
375 heap->print_heap_after_gc();
376 heap->trace_heap_after_gc(_gc_tracer);
377
378 heap->post_full_gc_dump(_gc_timer);
379
380 #ifdef TRACESPINNING
381 ParallelTaskTerminator::print_termination_counts();
382 #endif
383
384 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
385
386 _gc_timer->register_gc_end();
387
388 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
389
390 return true;
391 }
392
393 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
394 PSYoungGen* young_gen,
395 PSOldGen* old_gen) {
396 MutableSpace* const eden_space = young_gen->eden_space();
397 assert(!eden_space->is_empty(), "eden must be non-empty");
398 assert(young_gen->virtual_space()->alignment() ==
399 old_gen->virtual_space()->alignment(), "alignments do not match");
400
401 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
402 return false;
403 }
404
405 // Both generations must be completely committed.
411 }
412
413 // Figure out how much to take from eden. Include the average amount promoted
414 // in the total; otherwise the next young gen GC will simply bail out to a
415 // full GC.
416 const size_t alignment = old_gen->virtual_space()->alignment();
417 const size_t eden_used = eden_space->used_in_bytes();
418 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
419 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
420 const size_t eden_capacity = eden_space->capacity_in_bytes();
421
422 if (absorb_size >= eden_capacity) {
423 return false; // Must leave some space in eden.
424 }
425
426 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
427 if (new_young_size < young_gen->min_gen_size()) {
428 return false; // Respect young gen minimum size.
429 }
430
431 log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K: "
432 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
433 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
434 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
435 absorb_size / K,
436 eden_capacity / K, (eden_capacity - absorb_size) / K,
437 young_gen->from_space()->used_in_bytes() / K,
438 young_gen->to_space()->used_in_bytes() / K,
439 young_gen->capacity_in_bytes() / K, new_young_size / K);
440
441 // Fill the unused part of the old gen.
442 MutableSpace* const old_space = old_gen->object_space();
443 HeapWord* const unused_start = old_space->top();
444 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
445
446 if (unused_words > 0) {
447 if (unused_words < CollectedHeap::min_fill_size()) {
448 return false; // If the old gen cannot be filled, must give up.
449 }
450 CollectedHeap::fill_with_objects(unused_start, unused_words);
451 }
452
453 // Take the live data from eden and set both top and end in the old gen to
454 // eden top. (Need to set end because reset_after_change() mangles the region
455 // from end to virtual_space->high() in debug builds).
456 HeapWord* const new_top = eden_space->top();
457 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
458 absorb_size);
459 young_gen->reset_after_change();
483 MutableSpace* to_space = young_gen->to_space();
484 _preserved_marks = (PreservedMark*)to_space->top();
485 _preserved_count = 0;
486
487 // We want to calculate the size in bytes first.
488 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
489 // Now divide by the size of a PreservedMark
490 _preserved_count_max /= sizeof(PreservedMark);
491 }
492
493
494 void PSMarkSweep::deallocate_stacks() {
495 _preserved_mark_stack.clear(true);
496 _preserved_oop_stack.clear(true);
497 _marking_stack.clear();
498 _objarray_stack.clear(true);
499 }
500
501 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
502 // Recursively traverse all live objects and mark them
503 GCTraceTime(Trace, gc) tm("phase 1: Mark live objects", _gc_timer);
504
505 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
506
507 // Need to clear claim bits before the tracing starts.
508 ClassLoaderDataGraph::clear_claimed_marks();
509
510 // General strong roots.
511 {
512 ParallelScavengeHeap::ParStrongRootsScope psrs;
513 Universe::oops_do(mark_and_push_closure());
514 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
515 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
516 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
517 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
518 ObjectSynchronizer::oops_do(mark_and_push_closure());
519 FlatProfiler::oops_do(mark_and_push_closure());
520 Management::oops_do(mark_and_push_closure());
521 JvmtiExport::oops_do(mark_and_push_closure());
522 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
523 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
542
543 // Unload classes and purge the SystemDictionary.
544 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
545
546 // Unload nmethods.
547 CodeCache::do_unloading(is_alive_closure(), purged_class);
548
549 // Prune dead klasses from subklass/sibling/implementor lists.
550 Klass::clean_weak_klass_links(is_alive_closure());
551
552 // Delete entries for dead interned strings.
553 StringTable::unlink(is_alive_closure());
554
555 // Clean up unreferenced symbols in symbol table.
556 SymbolTable::unlink();
557 _gc_tracer->report_object_count_after_gc(is_alive_closure());
558 }
559
560
561 void PSMarkSweep::mark_sweep_phase2() {
562 GCTraceTime(Trace, gc) tm("phase 2: Compute new object addresses", _gc_timer);
563
564 // Now all live objects are marked, compute the new object addresses.
565
566 // It is not required that we traverse spaces in the same order in
567 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
568 // tracking expects us to do so. See comment under phase4.
569
570 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
571 PSOldGen* old_gen = heap->old_gen();
572
573 // Begin compacting into the old gen
574 PSMarkSweepDecorator::set_destination_decorator_tenured();
575
576 // This will also compact the young gen spaces.
577 old_gen->precompact();
578 }
579
580 // This should be moved to the shared markSweep code!
581 class PSAlwaysTrueClosure: public BoolObjectClosure {
582 public:
583 bool do_object_b(oop p) { return true; }
584 };
585 static PSAlwaysTrueClosure always_true;
586
587 void PSMarkSweep::mark_sweep_phase3() {
588 // Adjust the pointers to reflect the new locations
589 GCTraceTime(Trace, gc) tm("phase 3: Adjust pointers", _gc_timer);
590
591 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
592 PSYoungGen* young_gen = heap->young_gen();
593 PSOldGen* old_gen = heap->old_gen();
594
595 // Need to clear claim bits before the tracing starts.
596 ClassLoaderDataGraph::clear_claimed_marks();
597
598 // General strong roots.
599 Universe::oops_do(adjust_pointer_closure());
600 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
601 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
602 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
603 ObjectSynchronizer::oops_do(adjust_pointer_closure());
604 FlatProfiler::oops_do(adjust_pointer_closure());
605 Management::oops_do(adjust_pointer_closure());
606 JvmtiExport::oops_do(adjust_pointer_closure());
607 SystemDictionary::oops_do(adjust_pointer_closure());
608 ClassLoaderDataGraph::cld_do(adjust_cld_closure());
609
610 // Now adjust pointers in remaining weak roots. (All of which should
611 // have been cleared if they pointed to non-surviving objects.)
612 // Global (weak) JNI handles
613 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
614
615 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
616 CodeCache::blobs_do(&adjust_from_blobs);
617 StringTable::oops_do(adjust_pointer_closure());
618 ref_processor()->weak_oops_do(adjust_pointer_closure());
619 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
620
621 adjust_marks();
622
623 young_gen->adjust_pointers();
624 old_gen->adjust_pointers();
625 }
626
627 void PSMarkSweep::mark_sweep_phase4() {
628 EventMark m("4 compact heap");
629 GCTraceTime(Trace, gc) tm("phase 4: Move objects", _gc_timer);
630
631 // All pointers are now adjusted, move objects accordingly
632
633 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
634 PSYoungGen* young_gen = heap->young_gen();
635 PSOldGen* old_gen = heap->old_gen();
636
637 old_gen->compact();
638 young_gen->compact();
639 }
640
641 jlong PSMarkSweep::millis_since_last_gc() {
642 // We need a monotonically non-decreasing time in ms but
643 // os::javaTimeMillis() does not guarantee monotonicity.
644 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
645 jlong ret_val = now - _time_of_last_gc;
646 // XXX See note in genCollectedHeap::millis_since_last_gc().
647 if (ret_val < 0) {
648 NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
649 return 0;
|