src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc-gccause-full-gc Sdiff src/share/vm/gc_implementation/parNew

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page




 899   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
 900     set_avoid_promotion_undo(true);
 901   }
 902 
 903   // If the next generation is too full to accomodate worst-case promotion
 904   // from this generation, pass on collection; let the next generation
 905   // do it.
 906   if (!collection_attempt_is_safe()) {
 907     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 908     return;
 909   }
 910   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 911 
 912   init_assuming_no_promotion_failure();
 913 
 914   if (UseAdaptiveSizePolicy) {
 915     set_survivor_overflow(false);
 916     size_policy->minor_collection_begin();
 917   }
 918 
 919   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 920   // Capture heap used before collection (for printing).
 921   size_t gch_prev_used = gch->used();
 922 
 923   SpecializationStats::clear();
 924 
 925   age_table()->clear();
 926   to()->clear(SpaceDecorator::Mangle);
 927 
 928   gch->save_marks();
 929   assert(workers != NULL, "Need parallel worker threads.");
 930   int n_workers = active_workers;
 931 
 932   // Set the correct parallelism (number of queues) in the reference processor
 933   ref_processor()->set_active_mt_degree(n_workers);
 934 
 935   // Always set the terminator for the active number of workers
 936   // because only those workers go through the termination protocol.
 937   ParallelTaskTerminator _term(n_workers, task_queues());
 938   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 939                                          *to(), *this, *_next_gen, *task_queues(),




 899   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
 900     set_avoid_promotion_undo(true);
 901   }
 902 
 903   // If the next generation is too full to accomodate worst-case promotion
 904   // from this generation, pass on collection; let the next generation
 905   // do it.
 906   if (!collection_attempt_is_safe()) {
 907     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 908     return;
 909   }
 910   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 911 
 912   init_assuming_no_promotion_failure();
 913 
 914   if (UseAdaptiveSizePolicy) {
 915     set_survivor_overflow(false);
 916     size_policy->minor_collection_begin();
 917   }
 918 
 919   TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
 920   // Capture heap used before collection (for printing).
 921   size_t gch_prev_used = gch->used();
 922 
 923   SpecializationStats::clear();
 924 
 925   age_table()->clear();
 926   to()->clear(SpaceDecorator::Mangle);
 927 
 928   gch->save_marks();
 929   assert(workers != NULL, "Need parallel worker threads.");
 930   int n_workers = active_workers;
 931 
 932   // Set the correct parallelism (number of queues) in the reference processor
 933   ref_processor()->set_active_mt_degree(n_workers);
 934 
 935   // Always set the terminator for the active number of workers
 936   // because only those workers go through the termination protocol.
 937   ParallelTaskTerminator _term(n_workers, task_queues());
 938   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 939                                          *to(), *this, *_next_gen, *task_queues(),


src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File