< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 11460 : refactor: switch to update_active_threads()


1316       clear_rsets_post_compaction();
1317       check_gc_time_stamps();
1318 
1319       resize_if_necessary_after_full_collection();
1320 
1321       // We should do this after we potentially resize the heap so
1322       // that all the COMMIT / UNCOMMIT events are generated before
1323       // the compaction events.
1324       print_hrm_post_compaction();
1325 
1326       if (_hot_card_cache->use_cache()) {
1327         _hot_card_cache->reset_card_counts();
1328         _hot_card_cache->reset_hot_cache();
1329       }
1330 
1331       // Rebuild remembered sets of all regions.
1332       uint n_workers =
1333         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1334                                                 workers()->active_workers(),
1335                                                 Threads::number_of_non_daemon_threads());
1336       workers()->set_active_workers(n_workers);
1337 
1338       ParRebuildRSTask rebuild_rs_task(this);
1339       workers()->run_task(&rebuild_rs_task);
1340 
1341       // Rebuild the strong code root lists for each region
1342       rebuild_strong_code_roots();
1343 
1344       if (true) { // FIXME
1345         MetaspaceGC::compute_new_size();
1346       }
1347 
1348 #ifdef TRACESPINNING
1349       ParallelTaskTerminator::print_termination_counts();
1350 #endif
1351 
1352       // Discard all rset updates
1353       JavaThread::dirty_card_queue_set().abandon_logs();
1354       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1355 
1356       // At this point there should be no regions in the


3149       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3150     }
3151 
3152     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3153 
3154     GCTraceCPUTime tcpu;
3155 
3156     FormatBuffer<> gc_string("Pause ");
3157     if (collector_state()->during_initial_mark_pause()) {
3158       gc_string.append("Initial Mark");
3159     } else if (collector_state()->gcs_are_young()) {
3160       gc_string.append("Young");
3161     } else {
3162       gc_string.append("Mixed");
3163     }
3164     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3165 
3166     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3167                                                                   workers()->active_workers(),
3168                                                                   Threads::number_of_non_daemon_threads());
3169     workers()->set_active_workers(active_workers);
3170 
3171     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3172     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3173 
3174     // If the secondary_free_list is not empty, append it to the
3175     // free_list. No need to wait for the cleanup operation to finish;
3176     // the region allocation code will check the secondary_free_list
3177     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3178     // set, skip this step so that the region allocation code has to
3179     // get entries from the secondary_free_list.
3180     if (!G1StressConcRegionFreeing) {
3181       append_secondary_free_list_if_not_empty_with_lock();
3182     }
3183 
3184     G1HeapTransition heap_transition(this);
3185     size_t heap_used_bytes_before_gc = used();
3186 
3187     // Don't dynamically change the number of GC threads this early.  A value of
3188     // 0 is used to indicate serial work.  When parallel work is done,
3189     // it will be set.




1316       clear_rsets_post_compaction();
1317       check_gc_time_stamps();
1318 
1319       resize_if_necessary_after_full_collection();
1320 
1321       // We should do this after we potentially resize the heap so
1322       // that all the COMMIT / UNCOMMIT events are generated before
1323       // the compaction events.
1324       print_hrm_post_compaction();
1325 
1326       if (_hot_card_cache->use_cache()) {
1327         _hot_card_cache->reset_card_counts();
1328         _hot_card_cache->reset_hot_cache();
1329       }
1330 
1331       // Rebuild remembered sets of all regions.
1332       uint n_workers =
1333         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1334                                                 workers()->active_workers(),
1335                                                 Threads::number_of_non_daemon_threads());
1336       workers()->update_active_workers(n_workers);
1337 
1338       ParRebuildRSTask rebuild_rs_task(this);
1339       workers()->run_task(&rebuild_rs_task);
1340 
1341       // Rebuild the strong code root lists for each region
1342       rebuild_strong_code_roots();
1343 
1344       if (true) { // FIXME
1345         MetaspaceGC::compute_new_size();
1346       }
1347 
1348 #ifdef TRACESPINNING
1349       ParallelTaskTerminator::print_termination_counts();
1350 #endif
1351 
1352       // Discard all rset updates
1353       JavaThread::dirty_card_queue_set().abandon_logs();
1354       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1355 
1356       // At this point there should be no regions in the


3149       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3150     }
3151 
3152     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3153 
3154     GCTraceCPUTime tcpu;
3155 
3156     FormatBuffer<> gc_string("Pause ");
3157     if (collector_state()->during_initial_mark_pause()) {
3158       gc_string.append("Initial Mark");
3159     } else if (collector_state()->gcs_are_young()) {
3160       gc_string.append("Young");
3161     } else {
3162       gc_string.append("Mixed");
3163     }
3164     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3165 
3166     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3167                                                                   workers()->active_workers(),
3168                                                                   Threads::number_of_non_daemon_threads());
3169     workers()->update_active_workers(active_workers);
3170 
3171     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3172     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3173 
3174     // If the secondary_free_list is not empty, append it to the
3175     // free_list. No need to wait for the cleanup operation to finish;
3176     // the region allocation code will check the secondary_free_list
3177     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3178     // set, skip this step so that the region allocation code has to
3179     // get entries from the secondary_free_list.
3180     if (!G1StressConcRegionFreeing) {
3181       append_secondary_free_list_if_not_empty_with_lock();
3182     }
3183 
3184     G1HeapTransition heap_transition(this);
3185     size_t heap_used_bytes_before_gc = used();
3186 
3187     // Don't dynamically change the number of GC threads this early.  A value of
3188     // 0 is used to indicate serial work.  When parallel work is done,
3189     // it will be set.


< prev index next >