23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/threadCritical.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "runtime/vm_operations.hpp"
56 #include "services/memoryService.hpp"
57 #include "utilities/stack.inline.hpp"
58
59 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
60 int PSScavenge::_consecutive_skipped_scavenges = 0;
61 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
62 CardTableExtension* PSScavenge::_card_table = NULL;
63 bool PSScavenge::_survivor_overflow = false;
64 uint PSScavenge::_tenuring_threshold = 0;
65 HeapWord* PSScavenge::_young_generation_boundary = NULL;
66 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
67 elapsedTimer PSScavenge::_accumulated_time;
68 STWGCTimer PSScavenge::_gc_timer;
273
274 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
275 GCCause::Cause gc_cause = heap->gc_cause();
276
277 // Check for potential problems.
278 if (!should_attempt_scavenge()) {
279 return false;
280 }
281
282 GCIdMark gc_id_mark;
283 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
284
285 bool promotion_failure_occurred = false;
286
287 PSYoungGen* young_gen = heap->young_gen();
288 PSOldGen* old_gen = heap->old_gen();
289 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
290
291 heap->increment_total_collections();
292
293 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
294
295 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
296 // Gather the feedback data for eden occupancy.
297 young_gen->eden_space()->accumulate_statistics();
298 }
299
300 heap->print_heap_before_gc();
301 heap->trace_heap_before_gc(&_gc_tracer);
302
303 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
304 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
305
306 size_t prev_used = heap->used();
307
308 // Fill in TLABs
309 heap->accumulate_statistics_all_tlabs();
310 heap->ensure_parsability(true); // retire TLABs
311
312 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
313 HandleMark hm; // Discard invalid handles created during verification
314 Universe::verify(" VerifyBeforeGC:");
315 }
316
317 {
318 ResourceMark rm;
319 HandleMark hm;
320
321 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
322 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
323 TraceCollectorStats tcs(counters());
324 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
325
326 if (TraceYoungGenTime) accumulated_time()->start();
327
328 // Let the size policy know we're starting
329 size_policy->minor_collection_begin();
330
331 // Verify the object start arrays.
332 if (VerifyObjectStartArray &&
333 VerifyBeforeGC) {
334 old_gen->verify_object_start_array();
335 }
336
337 // Verify no unmarked old->young roots
338 if (VerifyRememberedSets) {
339 CardTableExtension::verify_all_young_refs_imprecise();
340 }
341
342 assert(young_gen->to_space()->is_empty(),
343 "Attempt to scavenge with live objects in to_space");
344 young_gen->to_space()->clear(SpaceDecorator::Mangle);
345
346 save_to_space_top_before_gc();
347
348 #if defined(COMPILER2) || INCLUDE_JVMCI
349 DerivedPointerTable::clear();
350 #endif
351
352 reference_processor()->enable_discovery();
353 reference_processor()->setup_policy(false);
354
355 // We track how much was promoted to the next generation for
356 // the AdaptiveSizePolicy.
357 size_t old_gen_used_before = old_gen->used_in_bytes();
358
359 // For PrintGCDetails
360 size_t young_gen_used_before = young_gen->used_in_bytes();
361
362 // Reset our survivor overflow.
363 set_survivor_overflow(false);
364
365 // We need to save the old top values before
366 // creating the promotion_manager. We pass the top
367 // values to the card_table, to prevent it from
368 // straying into the promotion labs.
369 HeapWord* old_top = old_gen->object_space()->top();
370
371 // Release all previously held resources
372 gc_task_manager()->release_all_resources();
373
374 // Set the number of GC threads to be used in this collection
375 gc_task_manager()->set_active_gang();
376 gc_task_manager()->task_idle_workers();
377 // Get the active number of workers here and use that value
378 // throughout the methods.
379 uint active_workers = gc_task_manager()->active_workers();
380
381 PSPromotionManager::pre_scavenge();
382
383 // We'll use the promotion manager again later.
384 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
385 {
386 GCTraceTime tm("Scavenge", false, false, &_gc_timer);
387 ParallelScavengeHeap::ParStrongRootsScope psrs;
388
389 GCTaskQueue* q = GCTaskQueue::create();
390
391 if (!old_gen->object_space()->is_empty()) {
392 // There are only old-to-young pointers if there are objects
393 // in the old gen.
394 uint stripe_total = active_workers;
395 for(uint i=0; i < stripe_total; i++) {
396 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
397 }
398 }
399
400 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
401 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
402 // We scan the thread roots in parallel
403 Threads::create_thread_roots_tasks(q);
404 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
405 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
406 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
408 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
409 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
410 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
411
412 ParallelTaskTerminator terminator(
413 active_workers,
414 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
415 if (active_workers > 1) {
416 for (uint j = 0; j < active_workers; j++) {
417 q->enqueue(new StealTask(&terminator));
418 }
419 }
420
421 gc_task_manager()->execute_and_wait(q);
422 }
423
424 scavenge_midpoint.update();
425
426 // Process reference objects discovered during scavenge
427 {
428 GCTraceTime tm("References", false, false, &_gc_timer);
429
430 reference_processor()->setup_policy(false); // not always_clear
431 reference_processor()->set_active_mt_degree(active_workers);
432 PSKeepAliveClosure keep_alive(promotion_manager);
433 PSEvacuateFollowersClosure evac_followers(promotion_manager);
434 ReferenceProcessorStats stats;
435 if (reference_processor()->processing_is_mt()) {
436 PSRefProcTaskExecutor task_executor;
437 stats = reference_processor()->process_discovered_references(
438 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
439 &_gc_timer);
440 } else {
441 stats = reference_processor()->process_discovered_references(
442 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
443 }
444
445 _gc_tracer.report_gc_reference_stats(stats);
446
447 // Enqueue reference objects discovered during scavenge.
448 if (reference_processor()->processing_is_mt()) {
449 PSRefProcTaskExecutor task_executor;
450 reference_processor()->enqueue_discovered_references(&task_executor);
451 } else {
452 reference_processor()->enqueue_discovered_references(NULL);
453 }
454 }
455
456 {
457 GCTraceTime tm("StringTable", false, false, &_gc_timer);
458 // Unlink any dead interned Strings and process the remaining live ones.
459 PSScavengeRootsClosure root_closure(promotion_manager);
460 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
461 }
462
463 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
464 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
465 if (promotion_failure_occurred) {
466 clean_up_failed_promotion();
467 if (PrintGC) {
468 gclog_or_tty->print("--");
469 }
470 }
471
472 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
473
474 // Let the size policy know we're done. Note that we count promotion
475 // failure cleanup time as part of the collection (otherwise, we're
476 // implicitly saying it's mutator time).
477 size_policy->minor_collection_end(gc_cause);
478
479 if (!promotion_failure_occurred) {
480 // Swap the survivor spaces.
481 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
482 young_gen->from_space()->clear(SpaceDecorator::Mangle);
483 young_gen->swap_spaces();
484
485 size_t survived = young_gen->from_space()->used_in_bytes();
486 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
487 size_policy->update_averages(_survivor_overflow, survived, promoted);
488
489 // A successful scavenge should restart the GC time limit count which is
490 // for full GC's.
491 size_policy->reset_gc_overhead_limit_count();
492 if (UseAdaptiveSizePolicy) {
493 // Calculate the new survivor size and tenuring threshold
494
495 if (PrintAdaptiveSizePolicy) {
496 gclog_or_tty->print("AdaptiveSizeStart: ");
497 gclog_or_tty->stamp();
498 gclog_or_tty->print_cr(" collection: %d ",
499 heap->total_collections());
500
501 if (Verbose) {
502 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
503 " young_gen_capacity: " SIZE_FORMAT,
504 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
505 }
506 }
507
508
509 if (UsePerfData) {
510 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
511 counters->update_old_eden_size(
512 size_policy->calculated_eden_size_in_bytes());
513 counters->update_old_promo_size(
514 size_policy->calculated_promo_size_in_bytes());
515 counters->update_old_capacity(old_gen->capacity_in_bytes());
516 counters->update_young_capacity(young_gen->capacity_in_bytes());
517 counters->update_survived(survived);
518 counters->update_promoted(promoted);
519 counters->update_survivor_overflowed(_survivor_overflow);
520 }
521
522 size_t max_young_size = young_gen->max_size();
523
524 // Deciding a free ratio in the young generation is tricky, so if
525 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
526 // that the old generation size may have been limited because of them) we
527 // should then limit our young generation size using NewRatio to have it
528 // follow the old generation size.
529 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
530 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
531 }
532
533 size_t survivor_limit =
534 size_policy->max_survivor_size(max_young_size);
535 _tenuring_threshold =
536 size_policy->compute_survivor_space_size_and_threshold(
537 _survivor_overflow,
538 _tenuring_threshold,
539 survivor_limit);
540
541 if (PrintTenuringDistribution) {
542 gclog_or_tty->cr();
543 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u"
544 " (max threshold " UINTX_FORMAT ")",
545 size_policy->calculated_survivor_size_in_bytes(),
546 _tenuring_threshold, MaxTenuringThreshold);
547 }
548
549 if (UsePerfData) {
550 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
551 counters->update_tenuring_threshold(_tenuring_threshold);
552 counters->update_survivor_size_counters();
553 }
554
555 // Do call at minor collections?
556 // Don't check if the size_policy is ready at this
557 // level. Let the size_policy check that internally.
558 if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
559 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) {
560 // Calculate optimal free space amounts
561 assert(young_gen->max_size() >
562 young_gen->from_space()->capacity_in_bytes() +
563 young_gen->to_space()->capacity_in_bytes(),
564 "Sizes of space in young gen are out-of-bounds");
565
566 size_t young_live = young_gen->used_in_bytes();
567 size_t eden_live = young_gen->eden_space()->used_in_bytes();
585 max_old_gen_size,
586 max_eden_size,
587 false /* not full gc*/,
588 gc_cause,
589 heap->collector_policy());
590
591 size_policy->decay_supplemental_growth(false /* not full gc*/);
592 }
593 // Resize the young generation at every collection
594 // even if new sizes have not been calculated. This is
595 // to allow resizes that may have been inhibited by the
596 // relative location of the "to" and "from" spaces.
597
598 // Resizing the old gen at young collections can cause increases
599 // that don't feed back to the generation sizing policy until
600 // a full collection. Don't resize the old gen here.
601
602 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
603 size_policy->calculated_survivor_size_in_bytes());
604
605 if (PrintAdaptiveSizePolicy) {
606 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
607 heap->total_collections());
608 }
609 }
610
611 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
612 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
613 // Also update() will case adaptive NUMA chunk resizing.
614 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
615 young_gen->eden_space()->update();
616
617 heap->gc_policy_counters()->update_counters();
618
619 heap->resize_all_tlabs();
620
621 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
622 }
623
624 #if defined(COMPILER2) || INCLUDE_JVMCI
625 DerivedPointerTable::update_pointers();
626 #endif
627
628 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
629
630 {
631 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
632
633 CodeCache::prune_scavenge_root_nmethods();
634 }
635
636 // Re-verify object start arrays
637 if (VerifyObjectStartArray &&
638 VerifyAfterGC) {
639 old_gen->verify_object_start_array();
640 }
641
642 // Verify all old -> young cards are now precise
643 if (VerifyRememberedSets) {
644 // Precise verification will give false positives. Until this is fixed,
645 // use imprecise verification.
646 // CardTableExtension::verify_all_young_refs_precise();
647 CardTableExtension::verify_all_young_refs_imprecise();
648 }
649
650 if (TraceYoungGenTime) accumulated_time()->stop();
651
652 if (PrintGC) {
653 if (PrintGCDetails) {
654 // Don't print a GC timestamp here. This is after the GC so
655 // would be confusing.
656 young_gen->print_used_change(young_gen_used_before);
657 }
658 heap->print_heap_change(prev_used);
659 }
660
661 // Track memory usage and detect low memory
662 MemoryService::track_memory_usage();
663 heap->update_counters();
664
665 gc_task_manager()->release_idle_workers();
666 }
667
668 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
669 HandleMark hm; // Discard invalid handles created during verification
670 Universe::verify(" VerifyAfterGC:");
671 }
672
673 heap->print_heap_after_gc();
674 heap->trace_heap_after_gc(&_gc_tracer);
675
676 scavenge_exit.update();
677
678 if (PrintGCTaskTimeStamps) {
679 tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
680 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
681 scavenge_exit.ticks());
682 gc_task_manager()->print_task_time_stamps();
683 }
684
685 #ifdef TRACESPINNING
686 ParallelTaskTerminator::print_termination_counts();
687 #endif
688
689
690 _gc_timer.register_gc_end();
691
692 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
693
694 return !promotion_failure_occurred;
695 }
696
697 // This method iterates over all objects in the young generation,
698 // unforwarding markOops. It then restores any preserved mark oops,
699 // and clears the _preserved_mark_stack.
700 void PSScavenge::clean_up_failed_promotion() {
701 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
702 PSYoungGen* young_gen = heap->young_gen();
703
704 {
705 ResourceMark rm;
706
707 // Unforward all pointers in the young gen.
708 PSPromotionFailedClosure unforward_closure;
709 young_gen->object_iterate(&unforward_closure);
710
711 if (PrintGC && Verbose) {
712 gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
713 }
714
715 // Restore any saved marks.
716 while (!_preserved_oop_stack.is_empty()) {
717 oop obj = _preserved_oop_stack.pop();
718 markOop mark = _preserved_mark_stack.pop();
719 obj->set_mark(mark);
720 }
721
722 // Clear the preserved mark and oop stack caches.
723 _preserved_mark_stack.clear(true);
724 _preserved_oop_stack.clear(true);
725 }
726
727 // Reset the PromotionFailureALot counters.
728 NOT_PRODUCT(heap->reset_promotion_should_fail();)
729 }
730
731 // This method is called whenever an attempt to promote an object
732 // fails. Some markOops will need preservation, some will not. Note
733 // that the entire eden is traversed after a failed promotion, with
755 PSOldGen* old_gen = heap->old_gen();
756
757 // Do not attempt to promote unless to_space is empty
758 if (!young_gen->to_space()->is_empty()) {
759 _consecutive_skipped_scavenges++;
760 if (UsePerfData) {
761 counters->update_scavenge_skipped(to_space_not_empty);
762 }
763 return false;
764 }
765
766 // Test to see if the scavenge will likely fail.
767 PSAdaptiveSizePolicy* policy = heap->size_policy();
768
769 // A similar test is done in the policy's should_full_GC(). If this is
770 // changed, decide if that test should also be changed.
771 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
772 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
773 bool result = promotion_estimate < old_gen->free_in_bytes();
774
775 if (PrintGCDetails && Verbose) {
776 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
777 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
778 " padded_average_promoted " SIZE_FORMAT
779 " free in old gen " SIZE_FORMAT,
780 (size_t) policy->average_promoted_in_bytes(),
781 (size_t) policy->padded_average_promoted_in_bytes(),
782 old_gen->free_in_bytes());
783 if (young_gen->used_in_bytes() <
784 (size_t) policy->padded_average_promoted_in_bytes()) {
785 gclog_or_tty->print_cr(" padded_promoted_average is greater"
786 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
787 }
788 }
789
790 if (result) {
791 _consecutive_skipped_scavenges = 0;
792 } else {
793 _consecutive_skipped_scavenges++;
794 if (UsePerfData) {
795 counters->update_scavenge_skipped(promoted_too_large);
796 }
797 }
798 return result;
799 }
800
801 // Used to add tasks
802 GCTaskManager* const PSScavenge::gc_task_manager() {
803 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
804 "shouldn't return NULL");
805 return ParallelScavengeHeap::gc_task_manager();
806 }
807
|
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc/parallel/cardTableExtension.hpp"
29 #include "gc/parallel/gcTaskManager.hpp"
30 #include "gc/parallel/parallelScavengeHeap.hpp"
31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
32 #include "gc/parallel/psMarkSweep.hpp"
33 #include "gc/parallel/psParallelCompact.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.inline.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/isGCActiveMark.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessor.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "logging/log.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/fprofiler.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/threadCritical.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "runtime/vm_operations.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/stack.inline.hpp"
59
60 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
61 int PSScavenge::_consecutive_skipped_scavenges = 0;
62 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
63 CardTableExtension* PSScavenge::_card_table = NULL;
64 bool PSScavenge::_survivor_overflow = false;
65 uint PSScavenge::_tenuring_threshold = 0;
66 HeapWord* PSScavenge::_young_generation_boundary = NULL;
67 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
68 elapsedTimer PSScavenge::_accumulated_time;
69 STWGCTimer PSScavenge::_gc_timer;
274
275 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
276 GCCause::Cause gc_cause = heap->gc_cause();
277
278 // Check for potential problems.
279 if (!should_attempt_scavenge()) {
280 return false;
281 }
282
283 GCIdMark gc_id_mark;
284 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
285
286 bool promotion_failure_occurred = false;
287
288 PSYoungGen* young_gen = heap->young_gen();
289 PSOldGen* old_gen = heap->old_gen();
290 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
291
292 heap->increment_total_collections();
293
294 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
295 // Gather the feedback data for eden occupancy.
296 young_gen->eden_space()->accumulate_statistics();
297 }
298
299 heap->print_heap_before_gc();
300 heap->trace_heap_before_gc(&_gc_tracer);
301
302 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
303 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
304
305 // Fill in TLABs
306 heap->accumulate_statistics_all_tlabs();
307 heap->ensure_parsability(true); // retire TLABs
308
309 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
310 HandleMark hm; // Discard invalid handles created during verification
311 Universe::verify("Before GC");
312 }
313
314 {
315 ResourceMark rm;
316 HandleMark hm;
317
318 GCTraceCPUTime tcpu;
319 GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
320 TraceCollectorStats tcs(counters());
321 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
322
323 if (TraceYoungGenTime) accumulated_time()->start();
324
325 // Let the size policy know we're starting
326 size_policy->minor_collection_begin();
327
328 // Verify the object start arrays.
329 if (VerifyObjectStartArray &&
330 VerifyBeforeGC) {
331 old_gen->verify_object_start_array();
332 }
333
334 // Verify no unmarked old->young roots
335 if (VerifyRememberedSets) {
336 CardTableExtension::verify_all_young_refs_imprecise();
337 }
338
339 assert(young_gen->to_space()->is_empty(),
340 "Attempt to scavenge with live objects in to_space");
341 young_gen->to_space()->clear(SpaceDecorator::Mangle);
342
343 save_to_space_top_before_gc();
344
345 #if defined(COMPILER2) || INCLUDE_JVMCI
346 DerivedPointerTable::clear();
347 #endif
348
349 reference_processor()->enable_discovery();
350 reference_processor()->setup_policy(false);
351
352 PreGCValues pre_gc_values(heap);
353
354 // Reset our survivor overflow.
355 set_survivor_overflow(false);
356
357 // We need to save the old top values before
358 // creating the promotion_manager. We pass the top
359 // values to the card_table, to prevent it from
360 // straying into the promotion labs.
361 HeapWord* old_top = old_gen->object_space()->top();
362
363 // Release all previously held resources
364 gc_task_manager()->release_all_resources();
365
366 // Set the number of GC threads to be used in this collection
367 gc_task_manager()->set_active_gang();
368 gc_task_manager()->task_idle_workers();
369 // Get the active number of workers here and use that value
370 // throughout the methods.
371 uint active_workers = gc_task_manager()->active_workers();
372
373 PSPromotionManager::pre_scavenge();
374
375 // We'll use the promotion manager again later.
376 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
377 {
378 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
379 ParallelScavengeHeap::ParStrongRootsScope psrs;
380
381 GCTaskQueue* q = GCTaskQueue::create();
382
383 if (!old_gen->object_space()->is_empty()) {
384 // There are only old-to-young pointers if there are objects
385 // in the old gen.
386 uint stripe_total = active_workers;
387 for(uint i=0; i < stripe_total; i++) {
388 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
389 }
390 }
391
392 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
393 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
394 // We scan the thread roots in parallel
395 Threads::create_thread_roots_tasks(q);
396 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
397 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
398 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
400 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
401 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
402 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
403
404 ParallelTaskTerminator terminator(
405 active_workers,
406 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
407 if (active_workers > 1) {
408 for (uint j = 0; j < active_workers; j++) {
409 q->enqueue(new StealTask(&terminator));
410 }
411 }
412
413 gc_task_manager()->execute_and_wait(q);
414 }
415
416 scavenge_midpoint.update();
417
418 // Process reference objects discovered during scavenge
419 {
420 GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer);
421
422 reference_processor()->setup_policy(false); // not always_clear
423 reference_processor()->set_active_mt_degree(active_workers);
424 PSKeepAliveClosure keep_alive(promotion_manager);
425 PSEvacuateFollowersClosure evac_followers(promotion_manager);
426 ReferenceProcessorStats stats;
427 if (reference_processor()->processing_is_mt()) {
428 PSRefProcTaskExecutor task_executor;
429 stats = reference_processor()->process_discovered_references(
430 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
431 &_gc_timer);
432 } else {
433 stats = reference_processor()->process_discovered_references(
434 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
435 }
436
437 _gc_tracer.report_gc_reference_stats(stats);
438
439 // Enqueue reference objects discovered during scavenge.
440 if (reference_processor()->processing_is_mt()) {
441 PSRefProcTaskExecutor task_executor;
442 reference_processor()->enqueue_discovered_references(&task_executor);
443 } else {
444 reference_processor()->enqueue_discovered_references(NULL);
445 }
446 }
447
448 {
449 GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer);
450 // Unlink any dead interned Strings and process the remaining live ones.
451 PSScavengeRootsClosure root_closure(promotion_manager);
452 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
453 }
454
455 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
456 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
457 if (promotion_failure_occurred) {
458 clean_up_failed_promotion();
459 log_info(gc)("Promotion failed");
460 }
461
462 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
463
464 // Let the size policy know we're done. Note that we count promotion
465 // failure cleanup time as part of the collection (otherwise, we're
466 // implicitly saying it's mutator time).
467 size_policy->minor_collection_end(gc_cause);
468
469 if (!promotion_failure_occurred) {
470 // Swap the survivor spaces.
471 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
472 young_gen->from_space()->clear(SpaceDecorator::Mangle);
473 young_gen->swap_spaces();
474
475 size_t survived = young_gen->from_space()->used_in_bytes();
476 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
477 size_policy->update_averages(_survivor_overflow, survived, promoted);
478
479 // A successful scavenge should restart the GC time limit count which is
480 // for full GC's.
481 size_policy->reset_gc_overhead_limit_count();
482 if (UseAdaptiveSizePolicy) {
483 // Calculate the new survivor size and tenuring threshold
484
485 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
486 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
487 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
488
489 if (UsePerfData) {
490 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
491 counters->update_old_eden_size(
492 size_policy->calculated_eden_size_in_bytes());
493 counters->update_old_promo_size(
494 size_policy->calculated_promo_size_in_bytes());
495 counters->update_old_capacity(old_gen->capacity_in_bytes());
496 counters->update_young_capacity(young_gen->capacity_in_bytes());
497 counters->update_survived(survived);
498 counters->update_promoted(promoted);
499 counters->update_survivor_overflowed(_survivor_overflow);
500 }
501
502 size_t max_young_size = young_gen->max_size();
503
504 // Deciding a free ratio in the young generation is tricky, so if
505 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
506 // that the old generation size may have been limited because of them) we
507 // should then limit our young generation size using NewRatio to have it
508 // follow the old generation size.
509 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
510 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
511 }
512
513 size_t survivor_limit =
514 size_policy->max_survivor_size(max_young_size);
515 _tenuring_threshold =
516 size_policy->compute_survivor_space_size_and_threshold(
517 _survivor_overflow,
518 _tenuring_threshold,
519 survivor_limit);
520
521 log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")",
522 size_policy->calculated_survivor_size_in_bytes(),
523 _tenuring_threshold, MaxTenuringThreshold);
524
525 if (UsePerfData) {
526 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
527 counters->update_tenuring_threshold(_tenuring_threshold);
528 counters->update_survivor_size_counters();
529 }
530
531 // Do call at minor collections?
532 // Don't check if the size_policy is ready at this
533 // level. Let the size_policy check that internally.
534 if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
535 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) {
536 // Calculate optimal free space amounts
537 assert(young_gen->max_size() >
538 young_gen->from_space()->capacity_in_bytes() +
539 young_gen->to_space()->capacity_in_bytes(),
540 "Sizes of space in young gen are out-of-bounds");
541
542 size_t young_live = young_gen->used_in_bytes();
543 size_t eden_live = young_gen->eden_space()->used_in_bytes();
561 max_old_gen_size,
562 max_eden_size,
563 false /* not full gc*/,
564 gc_cause,
565 heap->collector_policy());
566
567 size_policy->decay_supplemental_growth(false /* not full gc*/);
568 }
569 // Resize the young generation at every collection
570 // even if new sizes have not been calculated. This is
571 // to allow resizes that may have been inhibited by the
572 // relative location of the "to" and "from" spaces.
573
574 // Resizing the old gen at young collections can cause increases
575 // that don't feed back to the generation sizing policy until
576 // a full collection. Don't resize the old gen here.
577
578 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
579 size_policy->calculated_survivor_size_in_bytes());
580
581 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
582 }
583
584 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
585 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
586 // Also update() will case adaptive NUMA chunk resizing.
587 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
588 young_gen->eden_space()->update();
589
590 heap->gc_policy_counters()->update_counters();
591
592 heap->resize_all_tlabs();
593
594 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
595 }
596
597 #if defined(COMPILER2) || INCLUDE_JVMCI
598 DerivedPointerTable::update_pointers();
599 #endif
600
601 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
602
603 {
604 GCTraceTime(Debug, gc, phases) tm("Prune Scavenge Root Methods", &_gc_timer);
605
606 CodeCache::prune_scavenge_root_nmethods();
607 }
608
609 // Re-verify object start arrays
610 if (VerifyObjectStartArray &&
611 VerifyAfterGC) {
612 old_gen->verify_object_start_array();
613 }
614
615 // Verify all old -> young cards are now precise
616 if (VerifyRememberedSets) {
617 // Precise verification will give false positives. Until this is fixed,
618 // use imprecise verification.
619 // CardTableExtension::verify_all_young_refs_precise();
620 CardTableExtension::verify_all_young_refs_imprecise();
621 }
622
623 if (TraceYoungGenTime) accumulated_time()->stop();
624
625 young_gen->print_used_change(pre_gc_values.young_gen_used());
626 old_gen->print_used_change(pre_gc_values.old_gen_used());
627 MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
628
629 // Track memory usage and detect low memory
630 MemoryService::track_memory_usage();
631 heap->update_counters();
632
633 gc_task_manager()->release_idle_workers();
634 }
635
636 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
637 HandleMark hm; // Discard invalid handles created during verification
638 Universe::verify("After GC");
639 }
640
641 heap->print_heap_after_gc();
642 heap->trace_heap_after_gc(&_gc_tracer);
643
644 scavenge_exit.update();
645
646 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
647 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
648 scavenge_exit.ticks());
649 gc_task_manager()->print_task_time_stamps();
650
651 #ifdef TRACESPINNING
652 ParallelTaskTerminator::print_termination_counts();
653 #endif
654
655 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
656
657 _gc_timer.register_gc_end();
658
659 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
660
661 return !promotion_failure_occurred;
662 }
663
664 // This method iterates over all objects in the young generation,
665 // unforwarding markOops. It then restores any preserved mark oops,
666 // and clears the _preserved_mark_stack.
667 void PSScavenge::clean_up_failed_promotion() {
668 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
669 PSYoungGen* young_gen = heap->young_gen();
670
671 {
672 ResourceMark rm;
673
674 // Unforward all pointers in the young gen.
675 PSPromotionFailedClosure unforward_closure;
676 young_gen->object_iterate(&unforward_closure);
677
678 log_trace(gc, ergo)("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
679
680 // Restore any saved marks.
681 while (!_preserved_oop_stack.is_empty()) {
682 oop obj = _preserved_oop_stack.pop();
683 markOop mark = _preserved_mark_stack.pop();
684 obj->set_mark(mark);
685 }
686
687 // Clear the preserved mark and oop stack caches.
688 _preserved_mark_stack.clear(true);
689 _preserved_oop_stack.clear(true);
690 }
691
692 // Reset the PromotionFailureALot counters.
693 NOT_PRODUCT(heap->reset_promotion_should_fail();)
694 }
695
696 // This method is called whenever an attempt to promote an object
697 // fails. Some markOops will need preservation, some will not. Note
698 // that the entire eden is traversed after a failed promotion, with
720 PSOldGen* old_gen = heap->old_gen();
721
722 // Do not attempt to promote unless to_space is empty
723 if (!young_gen->to_space()->is_empty()) {
724 _consecutive_skipped_scavenges++;
725 if (UsePerfData) {
726 counters->update_scavenge_skipped(to_space_not_empty);
727 }
728 return false;
729 }
730
731 // Test to see if the scavenge will likely fail.
732 PSAdaptiveSizePolicy* policy = heap->size_policy();
733
734 // A similar test is done in the policy's should_full_GC(). If this is
735 // changed, decide if that test should also be changed.
736 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
737 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
738 bool result = promotion_estimate < old_gen->free_in_bytes();
739
740 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
741 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
742 (size_t) policy->padded_average_promoted_in_bytes(),
743 old_gen->free_in_bytes());
744 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
745 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
746 }
747
748 if (result) {
749 _consecutive_skipped_scavenges = 0;
750 } else {
751 _consecutive_skipped_scavenges++;
752 if (UsePerfData) {
753 counters->update_scavenge_skipped(promoted_too_large);
754 }
755 }
756 return result;
757 }
758
759 // Used to add tasks
760 GCTaskManager* const PSScavenge::gc_task_manager() {
761 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
762 "shouldn't return NULL");
763 return ParallelScavengeHeap::gc_task_manager();
764 }
765
|