< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7792 : 8149347: G1: guarantee fails with UseDynamicNumberOfGCThreads
Reviewed-by: poonam, kevinw
rev 7793 : 8149347: G1: guarantee fails with UseDynamicNumberOfGCThreads
Reviewed-by: poonam, kevinw
rev 7794 : 8149347: G1: guarantee fails with UseDynamicNumberOfGCThreads
Reviewed-by: poonam, kevinw


3974   // Record whether this pause is an initial mark. When the current
3975   // thread has completed its logging output and it's safe to signal
3976   // the CM thread, the flag's value in the policy has been reset.
3977   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3978 
3979   // Inner scope for scope based logging, timers, and stats collection
3980   {
3981     EvacuationInfo evacuation_info;
3982 
3983     if (g1_policy()->during_initial_mark_pause()) {
3984       // We are about to start a marking cycle, so we increment the
3985       // full collection counter.
3986       increment_old_marking_cycles_started();
3987       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3988     }
3989 
3990     _gc_tracer_stw->report_yc_type(yc_type());
3991 
3992     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3993 
3994     uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3995                                 workers()->active_workers() : 1);







3996     double pause_start_sec = os::elapsedTime();
3997     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3998     log_gc_header();
3999 
4000     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
4001     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
4002 
4003     // If the secondary_free_list is not empty, append it to the
4004     // free_list. No need to wait for the cleanup operation to finish;
4005     // the region allocation code will check the secondary_free_list
4006     // and wait if necessary. If the G1StressConcRegionFreeing flag is
4007     // set, skip this step so that the region allocation code has to
4008     // get entries from the secondary_free_list.
4009     if (!G1StressConcRegionFreeing) {
4010       append_secondary_free_list_if_not_empty_with_lock();
4011     }
4012 
4013     assert(check_young_list_well_formed(), "young list should be well formed");
4014     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4015            "sanity check");


5744   // and could significantly increase the pause time.
5745 
5746   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5747   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5748 }
5749 
5750 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5751   _expand_heap_after_alloc_failure = true;
5752   _evacuation_failed = false;
5753 
5754   // Should G1EvacuationFailureALot be in effect for this GC?
5755   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5756 
5757   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5758 
5759   // Disable the hot card cache.
5760   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5761   hot_card_cache->reset_hot_cache_claimed_index();
5762   hot_card_cache->set_use_cache(false);
5763 
5764   uint n_workers;
5765   if (G1CollectedHeap::use_parallel_gc_threads()) {
5766     n_workers =
5767       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5768                                      workers()->active_workers(),
5769                                      Threads::number_of_non_daemon_threads());
5770     assert(UseDynamicNumberOfGCThreads ||
5771            n_workers == workers()->total_workers(),
5772            "If not dynamic should be using all the  workers");
5773     workers()->set_active_workers(n_workers);
5774     set_par_threads(n_workers);
5775   } else {
5776     assert(n_par_threads() == 0,
5777            "Should be the original non-parallel value");
5778     n_workers = 1;
5779   }
5780 
5781 
5782   init_for_evac_failure(NULL);
5783 
5784   rem_set()->prepare_for_younger_refs_iterate(true);
5785 
5786   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5787   double start_par_time_sec = os::elapsedTime();
5788   double end_par_time_sec;
5789 
5790   {
5791     G1RootProcessor root_processor(this);
5792     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5793     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5794     if (g1_policy()->during_initial_mark_pause()) {
5795       ClassLoaderDataGraph::clear_claimed_marks();
5796     }
5797 
5798     if (G1CollectedHeap::use_parallel_gc_threads()) {
5799       // The individual threads will set their evac-failure closures.
5800       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();




3974   // Record whether this pause is an initial mark. When the current
3975   // thread has completed its logging output and it's safe to signal
3976   // the CM thread, the flag's value in the policy has been reset.
3977   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3978 
3979   // Inner scope for scope based logging, timers, and stats collection
3980   {
3981     EvacuationInfo evacuation_info;
3982 
3983     if (g1_policy()->during_initial_mark_pause()) {
3984       // We are about to start a marking cycle, so we increment the
3985       // full collection counter.
3986       increment_old_marking_cycles_started();
3987       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3988     }
3989 
3990     _gc_tracer_stw->report_yc_type(yc_type());
3991 
3992     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3993 
3994     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3995                                                                   workers()->active_workers(),
3996                                                                   Threads::number_of_non_daemon_threads());
3997     assert(UseDynamicNumberOfGCThreads ||
3998            active_workers == workers()->total_workers(),
3999            "If not dynamic should be using all the  workers");
4000     workers()->set_active_workers(active_workers);
4001 
4002 
4003     double pause_start_sec = os::elapsedTime();
4004     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
4005     log_gc_header();
4006 
4007     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
4008     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
4009 
4010     // If the secondary_free_list is not empty, append it to the
4011     // free_list. No need to wait for the cleanup operation to finish;
4012     // the region allocation code will check the secondary_free_list
4013     // and wait if necessary. If the G1StressConcRegionFreeing flag is
4014     // set, skip this step so that the region allocation code has to
4015     // get entries from the secondary_free_list.
4016     if (!G1StressConcRegionFreeing) {
4017       append_secondary_free_list_if_not_empty_with_lock();
4018     }
4019 
4020     assert(check_young_list_well_formed(), "young list should be well formed");
4021     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4022            "sanity check");


5751   // and could significantly increase the pause time.
5752 
5753   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5754   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5755 }
5756 
5757 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5758   _expand_heap_after_alloc_failure = true;
5759   _evacuation_failed = false;
5760 
5761   // Should G1EvacuationFailureALot be in effect for this GC?
5762   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5763 
5764   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5765 
5766   // Disable the hot card cache.
5767   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5768   hot_card_cache->reset_hot_cache_claimed_index();
5769   hot_card_cache->set_use_cache(false);
5770 
5771   const uint n_workers = workers()->active_workers();





5772   assert(UseDynamicNumberOfGCThreads ||
5773          n_workers == workers()->total_workers(),
5774          "If not dynamic should be using all the  workers");

5775   set_par_threads(n_workers);






5776 
5777   init_for_evac_failure(NULL);
5778 
5779   rem_set()->prepare_for_younger_refs_iterate(true);
5780 
5781   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5782   double start_par_time_sec = os::elapsedTime();
5783   double end_par_time_sec;
5784 
5785   {
5786     G1RootProcessor root_processor(this);
5787     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5788     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5789     if (g1_policy()->during_initial_mark_pause()) {
5790       ClassLoaderDataGraph::clear_claimed_marks();
5791     }
5792 
5793     if (G1CollectedHeap::use_parallel_gc_threads()) {
5794       // The individual threads will set their evac-failure closures.
5795       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();


< prev index next >