3211
3212 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3213
3214 GCTraceCPUTime tcpu;
3215
3216 FormatBuffer<> gc_string("Pause ");
3217 if (collector_state()->during_initial_mark_pause()) {
3218 gc_string.append("Initial Mark");
3219 } else if (collector_state()->gcs_are_young()) {
3220 gc_string.append("Young");
3221 } else {
3222 gc_string.append("Mixed");
3223 }
3224 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3225
3226 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3227 workers()->active_workers(),
3228 Threads::number_of_non_daemon_threads());
3229 workers()->set_active_workers(active_workers);
3230
3231 g1_policy()->note_gc_start(active_workers);
3232
3233 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3234 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3235
3236 // If the secondary_free_list is not empty, append it to the
3237 // free_list. No need to wait for the cleanup operation to finish;
3238 // the region allocation code will check the secondary_free_list
3239 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3240 // set, skip this step so that the region allocation code has to
3241 // get entries from the secondary_free_list.
3242 if (!G1StressConcRegionFreeing) {
3243 append_secondary_free_list_if_not_empty_with_lock();
3244 }
3245
3246 G1HeapTransition heap_transition(this);
3247 size_t heap_used_bytes_before_gc = used();
3248
3249 assert(check_young_list_well_formed(), "young list should be well formed");
3250
3251 // Don't dynamically change the number of GC threads this early. A value of
|
3211
3212 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3213
3214 GCTraceCPUTime tcpu;
3215
3216 FormatBuffer<> gc_string("Pause ");
3217 if (collector_state()->during_initial_mark_pause()) {
3218 gc_string.append("Initial Mark");
3219 } else if (collector_state()->gcs_are_young()) {
3220 gc_string.append("Young");
3221 } else {
3222 gc_string.append("Mixed");
3223 }
3224 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3225
3226 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3227 workers()->active_workers(),
3228 Threads::number_of_non_daemon_threads());
3229 workers()->set_active_workers(active_workers);
3230
3231 g1_policy()->note_gc_start();
3232
3233 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3234 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3235
3236 // If the secondary_free_list is not empty, append it to the
3237 // free_list. No need to wait for the cleanup operation to finish;
3238 // the region allocation code will check the secondary_free_list
3239 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3240 // set, skip this step so that the region allocation code has to
3241 // get entries from the secondary_free_list.
3242 if (!G1StressConcRegionFreeing) {
3243 append_secondary_free_list_if_not_empty_with_lock();
3244 }
3245
3246 G1HeapTransition heap_transition(this);
3247 size_t heap_used_bytes_before_gc = used();
3248
3249 assert(check_young_list_well_formed(), "young list should be well formed");
3250
3251 // Don't dynamically change the number of GC threads this early. A value of
|