< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




3734   // Record whether this pause is an initial mark. When the current
3735   // thread has completed its logging output and it's safe to signal
3736   // the CM thread, the flag's value in the policy has been reset.
3737   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3738 
3739   // Inner scope for scope based logging, timers, and stats collection
3740   {
3741     EvacuationInfo evacuation_info;
3742 
3743     if (g1_policy()->during_initial_mark_pause()) {
3744       // We are about to start a marking cycle, so we increment the
3745       // full collection counter.
3746       increment_old_marking_cycles_started();
3747       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3748     }
3749 
3750     _gc_tracer_stw->report_yc_type(yc_type());
3751 
3752     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3753 
3754     int active_workers = workers()->active_workers();
3755     double pause_start_sec = os::elapsedTime();
3756     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3757     log_gc_header();
3758 
3759     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3760     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3761 
3762     // If the secondary_free_list is not empty, append it to the
3763     // free_list. No need to wait for the cleanup operation to finish;
3764     // the region allocation code will check the secondary_free_list
3765     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3766     // set, skip this step so that the region allocation code has to
3767     // get entries from the secondary_free_list.
3768     if (!G1StressConcRegionFreeing) {
3769       append_secondary_free_list_if_not_empty_with_lock();
3770     }
3771 
3772     assert(check_young_list_well_formed(), "young list should be well formed");
3773 
3774     // Don't dynamically change the number of GC threads this early.  A value of


5047   {
5048     uint n_workers = _g1h->workers()->active_workers();
5049     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5050     set_par_threads(n_workers);
5051     workers()->run_task(&g1_unlink_task);
5052     set_par_threads(0);
5053   }
5054 
5055   if (G1StringDedup::is_enabled()) {
5056     G1StringDedup::unlink(is_alive);
5057   }
5058 }
5059 
5060 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5061  private:
5062   DirtyCardQueueSet* _queue;
5063  public:
5064   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5065 
5066   virtual void work(uint worker_id) {
5067     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5068     G1GCParPhaseTimesTracker x(timer, G1GCPhaseTimes::RedirtyCards, worker_id);
5069 
5070     RedirtyLoggedCardTableEntryClosure cl;
5071     _queue->par_apply_closure_to_all_completed_buffers(&cl);
5072 
5073     timer->record_sub_count(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
5074   }
5075 };
5076 
5077 void G1CollectedHeap::redirty_logged_cards() {
5078   double redirty_logged_cards_start = os::elapsedTime();
5079 
5080   uint n_workers = _g1h->workers()->active_workers();
5081 
5082   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5083   dirty_card_queue_set().reset_for_par_iteration();
5084   set_par_threads(n_workers);
5085   workers()->run_task(&redirty_task);
5086   set_par_threads(0);
5087 
5088   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5089   dcq.merge_bufferlists(&dirty_card_queue_set());
5090   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5091 
5092   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5093 }




3734   // Record whether this pause is an initial mark. When the current
3735   // thread has completed its logging output and it's safe to signal
3736   // the CM thread, the flag's value in the policy has been reset.
3737   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3738 
3739   // Inner scope for scope based logging, timers, and stats collection
3740   {
3741     EvacuationInfo evacuation_info;
3742 
3743     if (g1_policy()->during_initial_mark_pause()) {
3744       // We are about to start a marking cycle, so we increment the
3745       // full collection counter.
3746       increment_old_marking_cycles_started();
3747       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3748     }
3749 
3750     _gc_tracer_stw->report_yc_type(yc_type());
3751 
3752     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3753 
3754     uint active_workers = workers()->active_workers();
3755     double pause_start_sec = os::elapsedTime();
3756     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3757     log_gc_header();
3758 
3759     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3760     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3761 
3762     // If the secondary_free_list is not empty, append it to the
3763     // free_list. No need to wait for the cleanup operation to finish;
3764     // the region allocation code will check the secondary_free_list
3765     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3766     // set, skip this step so that the region allocation code has to
3767     // get entries from the secondary_free_list.
3768     if (!G1StressConcRegionFreeing) {
3769       append_secondary_free_list_if_not_empty_with_lock();
3770     }
3771 
3772     assert(check_young_list_well_formed(), "young list should be well formed");
3773 
3774     // Don't dynamically change the number of GC threads this early.  A value of


5047   {
5048     uint n_workers = _g1h->workers()->active_workers();
5049     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5050     set_par_threads(n_workers);
5051     workers()->run_task(&g1_unlink_task);
5052     set_par_threads(0);
5053   }
5054 
5055   if (G1StringDedup::is_enabled()) {
5056     G1StringDedup::unlink(is_alive);
5057   }
5058 }
5059 
5060 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5061  private:
5062   DirtyCardQueueSet* _queue;
5063  public:
5064   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5065 
5066   virtual void work(uint worker_id) {
5067     G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
5068     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
5069 
5070     RedirtyLoggedCardTableEntryClosure cl;
5071     _queue->par_apply_closure_to_all_completed_buffers(&cl);
5072 
5073     phase_times->record_sub_count(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
5074   }
5075 };
5076 
5077 void G1CollectedHeap::redirty_logged_cards() {
5078   double redirty_logged_cards_start = os::elapsedTime();
5079 
5080   uint n_workers = _g1h->workers()->active_workers();
5081 
5082   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5083   dirty_card_queue_set().reset_for_par_iteration();
5084   set_par_threads(n_workers);
5085   workers()->run_task(&redirty_task);
5086   set_par_threads(0);
5087 
5088   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5089   dcq.merge_bufferlists(&dirty_card_queue_set());
5090   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5091 
5092   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5093 }


< prev index next >