< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




5298 
5299     assert(rp->num_q() == n_workers, "sanity");
5300     assert(n_workers <= rp->max_num_q(), "sanity");
5301 
5302     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, n_workers);
5303     rp->enqueue_discovered_references(&par_task_executor);
5304   }
5305 
5306   rp->verify_no_references_recorded();
5307   assert(!rp->discovery_enabled(), "should have been disabled");
5308 
5309   // FIXME
5310   // CM's reference processing also cleans up the string and symbol tables.
5311   // Should we do that here also? We could, but it is a serial operation
5312   // and could significantly increase the pause time.
5313 
5314   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5315   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5316 }
5317 










5318 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5319   _expand_heap_after_alloc_failure = true;
5320   _evacuation_failed = false;
5321 
5322   // Should G1EvacuationFailureALot be in effect for this GC?
5323   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5324 
5325   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5326 
5327   // Disable the hot card cache.
5328   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5329   hot_card_cache->reset_hot_cache_claimed_index();
5330   hot_card_cache->set_use_cache(false);
5331 
5332   const uint n_workers = workers()->active_workers();
5333 
5334   init_for_evac_failure(NULL);
5335 
5336   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5337   double start_par_time_sec = os::elapsedTime();
5338   double end_par_time_sec;












5339 
5340   {
5341     G1RootProcessor root_processor(this, n_workers);
5342     G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
5343     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5344     if (g1_policy()->during_initial_mark_pause()) {
5345       ClassLoaderDataGraph::clear_claimed_marks();
5346     }
5347 
5348     // The individual threads will set their evac-failure closures.
5349     if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5350 
5351     workers()->run_task(&g1_par_task);
5352     end_par_time_sec = os::elapsedTime();
5353 
5354     // Closing the inner scope will execute the destructor
5355     // for the G1RootProcessor object. We record the current
5356     // elapsed time before closing the scope so that time
5357     // taken for the destructor is NOT included in the
5358     // reported parallel time.




5298 
5299     assert(rp->num_q() == n_workers, "sanity");
5300     assert(n_workers <= rp->max_num_q(), "sanity");
5301 
5302     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, n_workers);
5303     rp->enqueue_discovered_references(&par_task_executor);
5304   }
5305 
5306   rp->verify_no_references_recorded();
5307   assert(!rp->discovery_enabled(), "should have been disabled");
5308 
5309   // FIXME
5310   // CM's reference processing also cleans up the string and symbol tables.
5311   // Should we do that here also? We could, but it is a serial operation
5312   // and could significantly increase the pause time.
5313 
5314   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5315   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5316 }
5317 
5318 class TimedTask : public AbstractGangTask {
5319  public:
5320   TimedTask() : AbstractGangTask("TimedTask") {}
5321   void work(uint worker_id) {
5322     // Do nothing.
5323   }
5324 };
5325 
5326 #include "utilities/ticks.inline.hpp"
5327 
5328 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5329   _expand_heap_after_alloc_failure = true;
5330   _evacuation_failed = false;
5331 
5332   // Should G1EvacuationFailureALot be in effect for this GC?
5333   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5334 
5335   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5336 
5337   // Disable the hot card cache.
5338   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5339   hot_card_cache->reset_hot_cache_claimed_index();
5340   hot_card_cache->set_use_cache(false);
5341 
5342   const uint n_workers = workers()->active_workers();
5343 
5344   init_for_evac_failure(NULL);
5345 
5346   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5347   double start_par_time_sec = os::elapsedTime();
5348   double end_par_time_sec;
5349 
5350   for (int i = 0; i < NewCodeParameter; i++) {
5351     TimedTask task;
5352     jlong start = os::elapsed_counter();
5353     workers()->run_task(&task);
5354     jlong end = os::elapsed_counter();
5355 
5356     gclog_or_tty->print_cr("TimedTask %f ms (%f - %f)",
5357         TimeHelper::counter_to_millis(end - start),
5358         TimeHelper::counter_to_millis(start),
5359         TimeHelper::counter_to_millis(end));
5360   }
5361 
5362   {
5363     G1RootProcessor root_processor(this, n_workers);
5364     G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
5365     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5366     if (g1_policy()->during_initial_mark_pause()) {
5367       ClassLoaderDataGraph::clear_claimed_marks();
5368     }
5369 
5370     // The individual threads will set their evac-failure closures.
5371     if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5372 
5373     workers()->run_task(&g1_par_task);
5374     end_par_time_sec = os::elapsedTime();
5375 
5376     // Closing the inner scope will execute the destructor
5377     // for the G1RootProcessor object. We record the current
5378     // elapsed time before closing the scope so that time
5379     // taken for the destructor is NOT included in the
5380     // reported parallel time.


< prev index next >