< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




3218   // Record whether this pause is an initial mark. When the current
3219   // thread has completed its logging output and it's safe to signal
3220   // the CM thread, the flag's value in the policy has been reset.
3221   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3222 
3223   // Inner scope for scope based logging, timers, and stats collection
3224   {
3225     EvacuationInfo evacuation_info;
3226 
3227     if (collector_state()->during_initial_mark_pause()) {
3228       // We are about to start a marking cycle, so we increment the
3229       // full collection counter.
3230       increment_old_marking_cycles_started();
3231       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3232     }
3233 
3234     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3235 
3236     GCTraceCPUTime tcpu;
3237 
3238     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3239                                                                   workers()->active_workers(),
3240                                                                   Threads::number_of_non_daemon_threads());
3241     workers()->set_active_workers(active_workers);
3242     FormatBuffer<> gc_string("Pause ");
3243     if (collector_state()->during_initial_mark_pause()) {
3244       gc_string.append("Initial Mark");
3245     } else if (collector_state()->gcs_are_young()) {
3246       gc_string.append("Young");
3247     } else {
3248       gc_string.append("Mixed");
3249     }
3250     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);





3251 
3252     g1_policy()->note_gc_start(active_workers);
3253 
3254     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3255     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3256 
3257     // If the secondary_free_list is not empty, append it to the
3258     // free_list. No need to wait for the cleanup operation to finish;
3259     // the region allocation code will check the secondary_free_list
3260     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3261     // set, skip this step so that the region allocation code has to
3262     // get entries from the secondary_free_list.
3263     if (!G1StressConcRegionFreeing) {
3264       append_secondary_free_list_if_not_empty_with_lock();
3265     }
3266 
3267     G1HeapTransition heap_transition(this);
3268     size_t heap_used_bytes_before_gc = used();
3269 
3270     assert(check_young_list_well_formed(), "young list should be well formed");




3218   // Record whether this pause is an initial mark. When the current
3219   // thread has completed its logging output and it's safe to signal
3220   // the CM thread, the flag's value in the policy has been reset.
3221   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3222 
3223   // Inner scope for scope based logging, timers, and stats collection
3224   {
3225     EvacuationInfo evacuation_info;
3226 
3227     if (collector_state()->during_initial_mark_pause()) {
3228       // We are about to start a marking cycle, so we increment the
3229       // full collection counter.
3230       increment_old_marking_cycles_started();
3231       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3232     }
3233 
3234     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3235 
3236     GCTraceCPUTime tcpu;
3237 




3238     FormatBuffer<> gc_string("Pause ");
3239     if (collector_state()->during_initial_mark_pause()) {
3240       gc_string.append("Initial Mark");
3241     } else if (collector_state()->gcs_are_young()) {
3242       gc_string.append("Young");
3243     } else {
3244       gc_string.append("Mixed");
3245     }
3246     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3247 
3248     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3249                                                                   workers()->active_workers(),
3250                                                                   Threads::number_of_non_daemon_threads());
3251     workers()->set_active_workers(active_workers);
3252 
3253     g1_policy()->note_gc_start(active_workers);
3254 
3255     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3256     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3257 
3258     // If the secondary_free_list is not empty, append it to the
3259     // free_list. No need to wait for the cleanup operation to finish;
3260     // the region allocation code will check the secondary_free_list
3261     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3262     // set, skip this step so that the region allocation code has to
3263     // get entries from the secondary_free_list.
3264     if (!G1StressConcRegionFreeing) {
3265       append_secondary_free_list_if_not_empty_with_lock();
3266     }
3267 
3268     G1HeapTransition heap_transition(this);
3269     size_t heap_used_bytes_before_gc = used();
3270 
3271     assert(check_young_list_well_formed(), "young list should be well formed");


< prev index next >