< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page




 879 
 880   // If the next generation is too full to accommodate worst-case promotion
 881   // from this generation, pass on collection; let the next generation
 882   // do it.
 883   if (!collection_attempt_is_safe()) {
 884     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 885     return;
 886   }
 887   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 888 
 889   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 890   gch->trace_heap_before_gc(gc_tracer());
 891 
 892   init_assuming_no_promotion_failure();
 893 
 894   if (UseAdaptiveSizePolicy) {
 895     set_survivor_overflow(false);
 896     size_policy->minor_collection_begin();
 897   }
 898 
 899   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 900   // Capture heap used before collection (for printing).
 901   size_t gch_prev_used = gch->used();
 902 
 903   age_table()->clear();
 904   to()->clear(SpaceDecorator::Mangle);
 905 
 906   gch->save_marks();
 907 
 908   // Set the correct parallelism (number of queues) in the reference processor
 909   ref_processor()->set_active_mt_degree(active_workers);
 910 
 911   // Always set the terminator for the active number of workers
 912   // because only those workers go through the termination protocol.
 913   ParallelTaskTerminator _term(active_workers, task_queues());
 914   ParScanThreadStateSet thread_state_set(active_workers,
 915                                          *to(), *this, *_old_gen, *task_queues(),
 916                                          _overflow_stacks, desired_plab_sz(), _term);
 917 
 918   thread_state_set.reset(active_workers, promotion_failed());
 919 


 942   }
 943 
 944   // Process (weak) reference objects found during scavenge.
 945   ReferenceProcessor* rp = ref_processor();
 946   IsAliveClosure is_alive(this);
 947   ScanWeakRefClosure scan_weak_ref(this);
 948   KeepAliveClosure keep_alive(&scan_weak_ref);
 949   ScanClosure               scan_without_gc_barrier(this, false);
 950   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 951   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 952   EvacuateFollowersClosureGeneral evacuate_followers(gch,
 953     &scan_without_gc_barrier, &scan_with_gc_barrier);
 954   rp->setup_policy(clear_all_soft_refs);
 955   // Can  the mt_degree be set later (at run_task() time would be best)?
 956   rp->set_active_mt_degree(active_workers);
 957   ReferenceProcessorStats stats;
 958   if (rp->processing_is_mt()) {
 959     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
 960     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 961                                               &evacuate_followers, &task_executor,
 962                                               _gc_timer, _gc_tracer.gc_id());
 963   } else {
 964     thread_state_set.flush();
 965     gch->save_marks();
 966     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 967                                               &evacuate_followers, NULL,
 968                                               _gc_timer, _gc_tracer.gc_id());
 969   }
 970   _gc_tracer.report_gc_reference_stats(stats);
 971   if (!promotion_failed()) {
 972     // Swap the survivor spaces.
 973     eden()->clear(SpaceDecorator::Mangle);
 974     from()->clear(SpaceDecorator::Mangle);
 975     if (ZapUnusedHeapArea) {
 976       // This is now done here because of the piece-meal mangling which
 977       // can check for valid mangling at intermediate points in the
 978       // collection(s).  When a young collection fails to collect
 979       // sufficient space resizing of the young generation can occur
 980       // and redistribute the spaces in the young generation.  Mangle
 981       // here so that unzapped regions don't get distributed to
 982       // other spaces.
 983       to()->mangle_unused_area();
 984     }
 985     swap_spaces();
 986 
 987     // A successful scavenge should restart the GC time limit count which is
 988     // for full GC's.




 879 
 880   // If the next generation is too full to accommodate worst-case promotion
 881   // from this generation, pass on collection; let the next generation
 882   // do it.
 883   if (!collection_attempt_is_safe()) {
 884     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 885     return;
 886   }
 887   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 888 
 889   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 890   gch->trace_heap_before_gc(gc_tracer());
 891 
 892   init_assuming_no_promotion_failure();
 893 
 894   if (UseAdaptiveSizePolicy) {
 895     set_survivor_overflow(false);
 896     size_policy->minor_collection_begin();
 897   }
 898 
 899   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 900   // Capture heap used before collection (for printing).
 901   size_t gch_prev_used = gch->used();
 902 
 903   age_table()->clear();
 904   to()->clear(SpaceDecorator::Mangle);
 905 
 906   gch->save_marks();
 907 
 908   // Set the correct parallelism (number of queues) in the reference processor
 909   ref_processor()->set_active_mt_degree(active_workers);
 910 
 911   // Always set the terminator for the active number of workers
 912   // because only those workers go through the termination protocol.
 913   ParallelTaskTerminator _term(active_workers, task_queues());
 914   ParScanThreadStateSet thread_state_set(active_workers,
 915                                          *to(), *this, *_old_gen, *task_queues(),
 916                                          _overflow_stacks, desired_plab_sz(), _term);
 917 
 918   thread_state_set.reset(active_workers, promotion_failed());
 919 


 942   }
 943 
 944   // Process (weak) reference objects found during scavenge.
 945   ReferenceProcessor* rp = ref_processor();
 946   IsAliveClosure is_alive(this);
 947   ScanWeakRefClosure scan_weak_ref(this);
 948   KeepAliveClosure keep_alive(&scan_weak_ref);
 949   ScanClosure               scan_without_gc_barrier(this, false);
 950   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 951   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 952   EvacuateFollowersClosureGeneral evacuate_followers(gch,
 953     &scan_without_gc_barrier, &scan_with_gc_barrier);
 954   rp->setup_policy(clear_all_soft_refs);
 955   // Can  the mt_degree be set later (at run_task() time would be best)?
 956   rp->set_active_mt_degree(active_workers);
 957   ReferenceProcessorStats stats;
 958   if (rp->processing_is_mt()) {
 959     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
 960     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 961                                               &evacuate_followers, &task_executor,
 962                                               _gc_timer);
 963   } else {
 964     thread_state_set.flush();
 965     gch->save_marks();
 966     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 967                                               &evacuate_followers, NULL,
 968                                               _gc_timer);
 969   }
 970   _gc_tracer.report_gc_reference_stats(stats);
 971   if (!promotion_failed()) {
 972     // Swap the survivor spaces.
 973     eden()->clear(SpaceDecorator::Mangle);
 974     from()->clear(SpaceDecorator::Mangle);
 975     if (ZapUnusedHeapArea) {
 976       // This is now done here because of the piece-meal mangling which
 977       // can check for valid mangling at intermediate points in the
 978       // collection(s).  When a young collection fails to collect
 979       // sufficient space resizing of the young generation can occur
 980       // and redistribute the spaces in the young generation.  Mangle
 981       // here so that unzapped regions don't get distributed to
 982       // other spaces.
 983       to()->mangle_unused_area();
 984     }
 985     swap_spaces();
 986 
 987     // A successful scavenge should restart the GC time limit count which is
 988     // for full GC's.


< prev index next >