< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 10309 : imported patch 8076463-add-logging-for-preserve-cm-tasks
rev 10310 : [mq]: 8150630-add-logging-for-merge-pss


4577 
4578     assert(rp->num_q() == n_workers, "sanity");
4579     assert(n_workers <= rp->max_num_q(), "sanity");
4580 
4581     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4582     rp->enqueue_discovered_references(&par_task_executor);
4583   }
4584 
4585   rp->verify_no_references_recorded();
4586   assert(!rp->discovery_enabled(), "should have been disabled");
4587 
4588   // FIXME
4589   // CM's reference processing also cleans up the string and symbol tables.
4590   // Should we do that here also? We could, but it is a serial operation
4591   // and could significantly increase the pause time.
4592 
4593   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4594   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4595 }
4596 






4597 void G1CollectedHeap::pre_evacuate_collection_set() {
4598   _expand_heap_after_alloc_failure = true;
4599   _evacuation_failed = false;
4600 
4601   // Disable the hot card cache.
4602   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4603   hot_card_cache->reset_hot_cache_claimed_index();
4604   hot_card_cache->set_use_cache(false);
4605 
4606   g1_rem_set()->prepare_for_oops_into_collection_set_do();
4607 }
4608 
4609 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4610   // Should G1EvacuationFailureALot be in effect for this GC?
4611   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
4612 
4613   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4614   double start_par_time_sec = os::elapsedTime();
4615   double end_par_time_sec;
4616 


4679     // Note: the values are reset only when an actual
4680     // evacuation failure occurs.
4681     NOT_PRODUCT(reset_evacuation_should_fail();)
4682   }
4683 
4684   // Enqueue any remaining references remaining on the STW
4685   // reference processor's discovered lists. We need to do
4686   // this after the card table is cleaned (and verified) as
4687   // the act of enqueueing entries on to the pending list
4688   // will log these updates (and dirty their associated
4689   // cards). We need these updates logged to update any
4690   // RSets.
4691   if (g1_policy()->should_process_references()) {
4692     enqueue_discovered_references(per_thread_states);
4693   } else {
4694     g1_policy()->phase_times()->record_ref_enq_time(0);
4695   }
4696 
4697   _allocator->release_gc_alloc_regions(evacuation_info);
4698 
4699   per_thread_states->flush();
4700 
4701   record_obj_copy_mem_stats();
4702 
4703   _survivor_evac_stats.adjust_desired_plab_sz();
4704   _old_evac_stats.adjust_desired_plab_sz();
4705 
4706   // Reset and re-enable the hot card cache.
4707   // Note the counts for the cards in the regions in the
4708   // collection set are reset when the collection set is freed.
4709   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4710   hot_card_cache->reset_hot_cache();
4711   hot_card_cache->set_use_cache(true);
4712 
4713   purge_code_root_memory();
4714 
4715   redirty_logged_cards();
4716 #if defined(COMPILER2) || INCLUDE_JVMCI
4717   DerivedPointerTable::update_pointers();
4718 #endif
4719 }




4577 
4578     assert(rp->num_q() == n_workers, "sanity");
4579     assert(n_workers <= rp->max_num_q(), "sanity");
4580 
4581     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4582     rp->enqueue_discovered_references(&par_task_executor);
4583   }
4584 
4585   rp->verify_no_references_recorded();
4586   assert(!rp->discovery_enabled(), "should have been disabled");
4587 
4588   // FIXME
4589   // CM's reference processing also cleans up the string and symbol tables.
4590   // Should we do that here also? We could, but it is a serial operation
4591   // and could significantly increase the pause time.
4592 
4593   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4594   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4595 }
4596 
4597 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4598   double merge_pss_time_start = os::elapsedTime();
4599   per_thread_states->flush();
4600   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4601 }
4602 
4603 void G1CollectedHeap::pre_evacuate_collection_set() {
4604   _expand_heap_after_alloc_failure = true;
4605   _evacuation_failed = false;
4606 
4607   // Disable the hot card cache.
4608   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4609   hot_card_cache->reset_hot_cache_claimed_index();
4610   hot_card_cache->set_use_cache(false);
4611 
4612   g1_rem_set()->prepare_for_oops_into_collection_set_do();
4613 }
4614 
4615 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4616   // Should G1EvacuationFailureALot be in effect for this GC?
4617   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
4618 
4619   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4620   double start_par_time_sec = os::elapsedTime();
4621   double end_par_time_sec;
4622 


4685     // Note: the values are reset only when an actual
4686     // evacuation failure occurs.
4687     NOT_PRODUCT(reset_evacuation_should_fail();)
4688   }
4689 
4690   // Enqueue any remaining references remaining on the STW
4691   // reference processor's discovered lists. We need to do
4692   // this after the card table is cleaned (and verified) as
4693   // the act of enqueueing entries on to the pending list
4694   // will log these updates (and dirty their associated
4695   // cards). We need these updates logged to update any
4696   // RSets.
4697   if (g1_policy()->should_process_references()) {
4698     enqueue_discovered_references(per_thread_states);
4699   } else {
4700     g1_policy()->phase_times()->record_ref_enq_time(0);
4701   }
4702 
4703   _allocator->release_gc_alloc_regions(evacuation_info);
4704 
4705   merge_per_thread_state_info(per_thread_states);
4706 
4707   record_obj_copy_mem_stats();
4708 
4709   _survivor_evac_stats.adjust_desired_plab_sz();
4710   _old_evac_stats.adjust_desired_plab_sz();
4711 
4712   // Reset and re-enable the hot card cache.
4713   // Note the counts for the cards in the regions in the
4714   // collection set are reset when the collection set is freed.
4715   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4716   hot_card_cache->reset_hot_cache();
4717   hot_card_cache->set_use_cache(true);
4718 
4719   purge_code_root_memory();
4720 
4721   redirty_logged_cards();
4722 #if defined(COMPILER2) || INCLUDE_JVMCI
4723   DerivedPointerTable::update_pointers();
4724 #endif
4725 }


< prev index next >