< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




1766   G1StringDedup::initialize();
1767 
1768   _preserved_marks_set.init(ParallelGCThreads);
1769 
1770   _collection_set.initialize(max_regions());
1771 
1772   return JNI_OK;
1773 }
1774 
1775 void G1CollectedHeap::initialize_serviceability() {
1776   _eden_pool = new G1EdenPool(this);
1777   _survivor_pool = new G1SurvivorPool(this);
1778   _old_pool = new G1OldGenPool(this);
1779 
1780   _full_gc_memory_manager.add_pool(_eden_pool);
1781   _full_gc_memory_manager.add_pool(_survivor_pool);
1782   _full_gc_memory_manager.add_pool(_old_pool);
1783 
1784   _memory_manager.add_pool(_eden_pool);
1785   _memory_manager.add_pool(_survivor_pool);
1786 
1787 }
1788 
1789 void G1CollectedHeap::stop() {
1790   // Stop all concurrent threads. We do this to make sure these threads
1791   // do not continue to execute and access resources (e.g. logging)
1792   // that are destroyed during shutdown.
1793   _cr->stop();
1794   _young_gen_sampling_thread->stop();
1795   _cmThread->stop();
1796   if (G1StringDedup::is_enabled()) {
1797     G1StringDedup::stop();
1798   }
1799 }
1800 
1801 void G1CollectedHeap::safepoint_synchronize_begin() {
1802   SuspendibleThreadSet::synchronize();
1803 }
1804 
1805 void G1CollectedHeap::safepoint_synchronize_end() {
1806   SuspendibleThreadSet::desynchronize();


2897     FormatBuffer<> gc_string("Pause ");
2898     if (collector_state()->during_initial_mark_pause()) {
2899       gc_string.append("Initial Mark");
2900       verify_type = G1HeapVerifier::G1VerifyInitialMark;
2901     } else if (collector_state()->gcs_are_young()) {
2902       gc_string.append("Young");
2903       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
2904     } else {
2905       gc_string.append("Mixed");
2906       verify_type = G1HeapVerifier::G1VerifyMixed;
2907     }
2908     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2909 
2910     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2911                                                                   workers()->active_workers(),
2912                                                                   Threads::number_of_non_daemon_threads());
2913     workers()->update_active_workers(active_workers);
2914     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2915 
2916     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2917     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
2918 
2919     // If the secondary_free_list is not empty, append it to the
2920     // free_list. No need to wait for the cleanup operation to finish;
2921     // the region allocation code will check the secondary_free_list
2922     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2923     // set, skip this step so that the region allocation code has to
2924     // get entries from the secondary_free_list.
2925     if (!G1StressConcRegionFreeing) {
2926       append_secondary_free_list_if_not_empty_with_lock();
2927     }
2928 
2929     G1HeapTransition heap_transition(this);
2930     size_t heap_used_bytes_before_gc = used();
2931 
2932     // Don't dynamically change the number of GC threads this early.  A value of
2933     // 0 is used to indicate serial work.  When parallel work is done,
2934     // it will be set.
2935 
2936     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2937       IsGCActiveMark x;




1766   G1StringDedup::initialize();
1767 
1768   _preserved_marks_set.init(ParallelGCThreads);
1769 
1770   _collection_set.initialize(max_regions());
1771 
1772   return JNI_OK;
1773 }
1774 
1775 void G1CollectedHeap::initialize_serviceability() {
1776   _eden_pool = new G1EdenPool(this);
1777   _survivor_pool = new G1SurvivorPool(this);
1778   _old_pool = new G1OldGenPool(this);
1779 
1780   _full_gc_memory_manager.add_pool(_eden_pool);
1781   _full_gc_memory_manager.add_pool(_survivor_pool);
1782   _full_gc_memory_manager.add_pool(_old_pool);
1783 
1784   _memory_manager.add_pool(_eden_pool);
1785   _memory_manager.add_pool(_survivor_pool);
1786   _memory_manager.add_pool(_old_pool, false /* always_affected_by_gc */);
1787 }
1788 
1789 void G1CollectedHeap::stop() {
1790   // Stop all concurrent threads. We do this to make sure these threads
1791   // do not continue to execute and access resources (e.g. logging)
1792   // that are destroyed during shutdown.
1793   _cr->stop();
1794   _young_gen_sampling_thread->stop();
1795   _cmThread->stop();
1796   if (G1StringDedup::is_enabled()) {
1797     G1StringDedup::stop();
1798   }
1799 }
1800 
1801 void G1CollectedHeap::safepoint_synchronize_begin() {
1802   SuspendibleThreadSet::synchronize();
1803 }
1804 
1805 void G1CollectedHeap::safepoint_synchronize_end() {
1806   SuspendibleThreadSet::desynchronize();


2897     FormatBuffer<> gc_string("Pause ");
2898     if (collector_state()->during_initial_mark_pause()) {
2899       gc_string.append("Initial Mark");
2900       verify_type = G1HeapVerifier::G1VerifyInitialMark;
2901     } else if (collector_state()->gcs_are_young()) {
2902       gc_string.append("Young");
2903       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
2904     } else {
2905       gc_string.append("Mixed");
2906       verify_type = G1HeapVerifier::G1VerifyMixed;
2907     }
2908     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2909 
2910     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2911                                                                   workers()->active_workers(),
2912                                                                   Threads::number_of_non_daemon_threads());
2913     workers()->update_active_workers(active_workers);
2914     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2915 
2916     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2917     TraceMemoryManagerStats tms(&_memory_manager, gc_cause(), collector_state()->yc_type() == Mixed /* allMemoryPoolsAffected */);
2918 
2919     // If the secondary_free_list is not empty, append it to the
2920     // free_list. No need to wait for the cleanup operation to finish;
2921     // the region allocation code will check the secondary_free_list
2922     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2923     // set, skip this step so that the region allocation code has to
2924     // get entries from the secondary_free_list.
2925     if (!G1StressConcRegionFreeing) {
2926       append_secondary_free_list_if_not_empty_with_lock();
2927     }
2928 
2929     G1HeapTransition heap_transition(this);
2930     size_t heap_used_bytes_before_gc = used();
2931 
2932     // Don't dynamically change the number of GC threads this early.  A value of
2933     // 0 is used to indicate serial work.  When parallel work is done,
2934     // it will be set.
2935 
2936     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2937       IsGCActiveMark x;


< prev index next >