< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 48000 : [mq]: open.patch


  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1Allocator.inline.hpp"
  34 #include "gc/g1/g1CollectedHeap.inline.hpp"
  35 #include "gc/g1/g1CollectionSet.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ConcurrentRefine.hpp"
  39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullCollector.hpp"
  42 #include "gc/g1/g1GCPhaseTimes.hpp"
  43 #include "gc/g1/g1HeapSizingPolicy.hpp"
  44 #include "gc/g1/g1HeapTransition.hpp"
  45 #include "gc/g1/g1HeapVerifier.hpp"
  46 #include "gc/g1/g1HotCardCache.hpp"

  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1StringDedup.hpp"
  55 #include "gc/g1/g1YCTypes.hpp"
  56 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
  57 #include "gc/g1/heapRegion.inline.hpp"
  58 #include "gc/g1/heapRegionRemSet.hpp"
  59 #include "gc/g1/heapRegionSet.inline.hpp"
  60 #include "gc/g1/vm_operations_g1.hpp"
  61 #include "gc/shared/gcHeapSummary.hpp"
  62 #include "gc/shared/gcId.hpp"
  63 #include "gc/shared/gcLocker.inline.hpp"
  64 #include "gc/shared/gcTimer.hpp"
  65 #include "gc/shared/gcTrace.hpp"
  66 #include "gc/shared/gcTraceTime.inline.hpp"


1212   heap_transition->print();
1213   print_heap_after_gc();
1214   print_heap_regions();
1215 #ifdef TRACESPINNING
1216   ParallelTaskTerminator::print_termination_counts();
1217 #endif
1218 }
1219 
1220 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1221                                          bool clear_all_soft_refs) {
1222   assert_at_safepoint(true /* should_be_vm_thread */);
1223 
1224   if (GCLocker::check_active_before_gc()) {
1225     // Full GC was not completed.
1226     return false;
1227   }
1228 
1229   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1230       collector_policy()->should_clear_all_soft_refs();
1231 
1232   G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1233   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1234 
1235   collector.prepare_collection();
1236   collector.collect();
1237   collector.complete_collection();
1238 
1239   // Full collection was successfully completed.
1240   return true;
1241 }
1242 
1243 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1244   // Currently, there is no facility in the do_full_collection(bool) API to notify
1245   // the caller that the collection did not succeed (e.g., because it was locked
1246   // out by the GC locker). So, right now, we'll ignore the return value.
1247   bool dummy = do_full_collection(true,                /* explicit_gc */
1248                                   clear_all_soft_refs);
1249 }
1250 
1251 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1252   // Capacity, free and used after the GC counted as full regions to


1509   // below will make sure of that and do any remaining clean up.
1510   _allocator->abandon_gc_alloc_regions();
1511 
1512   // Instead of tearing down / rebuilding the free lists here, we
1513   // could instead use the remove_all_pending() method on free_list to
1514   // remove only the ones that we need to remove.
1515   tear_down_region_sets(true /* free_list_only */);
1516   shrink_helper(shrink_bytes);
1517   rebuild_region_sets(true /* free_list_only */);
1518 
1519   _hrm.verify_optional();
1520   _verifier->verify_region_sets_optional();
1521 }
1522 
1523 // Public methods.
1524 
1525 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1526   CollectedHeap(),
1527   _young_gen_sampling_thread(NULL),
1528   _collector_policy(collector_policy),





1529   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1530   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1531   _g1_policy(create_g1_policy(_gc_timer_stw)),
1532   _collection_set(this, _g1_policy),
1533   _dirty_card_queue_set(false),
1534   _is_alive_closure_cm(this),
1535   _is_alive_closure_stw(this),
1536   _ref_processor_cm(NULL),
1537   _ref_processor_stw(NULL),
1538   _bot(NULL),
1539   _hot_card_cache(NULL),
1540   _g1_rem_set(NULL),
1541   _cr(NULL),
1542   _g1mm(NULL),
1543   _preserved_marks_set(true /* in_c_heap */),
1544   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1545   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1546   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1547   _humongous_reclaim_candidates(),
1548   _has_humongous_reclaim_candidates(false),


1804 
1805   // Here we allocate the dummy HeapRegion that is required by the
1806   // G1AllocRegion class.
1807   HeapRegion* dummy_region = _hrm.get_dummy_region();
1808 
1809   // We'll re-use the same region whether the alloc region will
1810   // require BOT updates or not and, if it doesn't, then a non-young
1811   // region will complain that it cannot support allocations without
1812   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1813   dummy_region->set_eden();
1814   // Make sure it's full.
1815   dummy_region->set_top(dummy_region->end());
1816   G1AllocRegion::setup(this, dummy_region);
1817 
1818   _allocator->init_mutator_alloc_region();
1819 
1820   // Do create of the monitoring and management support so that
1821   // values in the heap have been properly initialized.
1822   _g1mm = new G1MonitoringSupport(this);
1823 











1824   G1StringDedup::initialize();
1825 
1826   _preserved_marks_set.init(ParallelGCThreads);
1827 
1828   _collection_set.initialize(max_regions());
1829 
1830   return JNI_OK;
1831 }
1832 
1833 void G1CollectedHeap::stop() {
1834   // Stop all concurrent threads. We do this to make sure these threads
1835   // do not continue to execute and access resources (e.g. logging)
1836   // that are destroyed during shutdown.
1837   _cr->stop();
1838   _young_gen_sampling_thread->stop();
1839   _cmThread->stop();
1840   if (G1StringDedup::is_enabled()) {
1841     G1StringDedup::stop();
1842   }
1843 }


2937 
2938     GCTraceCPUTime tcpu;
2939 
2940     FormatBuffer<> gc_string("Pause ");
2941     if (collector_state()->during_initial_mark_pause()) {
2942       gc_string.append("Initial Mark");
2943     } else if (collector_state()->gcs_are_young()) {
2944       gc_string.append("Young");
2945     } else {
2946       gc_string.append("Mixed");
2947     }
2948     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2949 
2950     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2951                                                                   workers()->active_workers(),
2952                                                                   Threads::number_of_non_daemon_threads());
2953     workers()->update_active_workers(active_workers);
2954     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2955 
2956     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2957     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
2958 
2959     // If the secondary_free_list is not empty, append it to the
2960     // free_list. No need to wait for the cleanup operation to finish;
2961     // the region allocation code will check the secondary_free_list
2962     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2963     // set, skip this step so that the region allocation code has to
2964     // get entries from the secondary_free_list.
2965     if (!G1StressConcRegionFreeing) {
2966       append_secondary_free_list_if_not_empty_with_lock();
2967     }
2968 
2969     G1HeapTransition heap_transition(this);
2970     size_t heap_used_bytes_before_gc = used();
2971 
2972     // Don't dynamically change the number of GC threads this early.  A value of
2973     // 0 is used to indicate serial work.  When parallel work is done,
2974     // it will be set.
2975 
2976     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2977       IsGCActiveMark x;


5350 
5351 public:
5352   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5353     _g1h(g1h) {}
5354 
5355   void do_code_blob(CodeBlob* cb) {
5356     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5357     if (nm == NULL) {
5358       return;
5359     }
5360 
5361     if (ScavengeRootsInCode) {
5362       _g1h->register_nmethod(nm);
5363     }
5364   }
5365 };
5366 
5367 void G1CollectedHeap::rebuild_strong_code_roots() {
5368   RebuildStrongCodeRootClosure blob_cl(this);
5369   CodeCache::blobs_do(&blob_cl);















5370 }


  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1Allocator.inline.hpp"
  34 #include "gc/g1/g1CollectedHeap.inline.hpp"
  35 #include "gc/g1/g1CollectionSet.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ConcurrentRefine.hpp"
  39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullCollector.hpp"
  42 #include "gc/g1/g1GCPhaseTimes.hpp"
  43 #include "gc/g1/g1HeapSizingPolicy.hpp"
  44 #include "gc/g1/g1HeapTransition.hpp"
  45 #include "gc/g1/g1HeapVerifier.hpp"
  46 #include "gc/g1/g1HotCardCache.hpp"
  47 #include "gc/g1/g1MemoryPool.hpp"
  48 #include "gc/g1/g1OopClosures.inline.hpp"
  49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  50 #include "gc/g1/g1Policy.hpp"
  51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  52 #include "gc/g1/g1RemSet.hpp"
  53 #include "gc/g1/g1RootClosures.hpp"
  54 #include "gc/g1/g1RootProcessor.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
  58 #include "gc/g1/heapRegion.inline.hpp"
  59 #include "gc/g1/heapRegionRemSet.hpp"
  60 #include "gc/g1/heapRegionSet.inline.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"


1213   heap_transition->print();
1214   print_heap_after_gc();
1215   print_heap_regions();
1216 #ifdef TRACESPINNING
1217   ParallelTaskTerminator::print_termination_counts();
1218 #endif
1219 }
1220 
1221 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1222                                          bool clear_all_soft_refs) {
1223   assert_at_safepoint(true /* should_be_vm_thread */);
1224 
1225   if (GCLocker::check_active_before_gc()) {
1226     // Full GC was not completed.
1227     return false;
1228   }
1229 
1230   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1231       collector_policy()->should_clear_all_soft_refs();
1232 
1233   G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
1234   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1235 
1236   collector.prepare_collection();
1237   collector.collect();
1238   collector.complete_collection();
1239 
1240   // Full collection was successfully completed.
1241   return true;
1242 }
1243 
1244 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1245   // Currently, there is no facility in the do_full_collection(bool) API to notify
1246   // the caller that the collection did not succeed (e.g., because it was locked
1247   // out by the GC locker). So, right now, we'll ignore the return value.
1248   bool dummy = do_full_collection(true,                /* explicit_gc */
1249                                   clear_all_soft_refs);
1250 }
1251 
1252 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1253   // Capacity, free and used after the GC counted as full regions to


1510   // below will make sure of that and do any remaining clean up.
1511   _allocator->abandon_gc_alloc_regions();
1512 
1513   // Instead of tearing down / rebuilding the free lists here, we
1514   // could instead use the remove_all_pending() method on free_list to
1515   // remove only the ones that we need to remove.
1516   tear_down_region_sets(true /* free_list_only */);
1517   shrink_helper(shrink_bytes);
1518   rebuild_region_sets(true /* free_list_only */);
1519 
1520   _hrm.verify_optional();
1521   _verifier->verify_region_sets_optional();
1522 }
1523 
1524 // Public methods.
1525 
1526 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1527   CollectedHeap(),
1528   _young_gen_sampling_thread(NULL),
1529   _collector_policy(collector_policy),
1530   _memory_manager("G1 Young Generation", "end of minor GC"),
1531   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1532   _eden_pool(NULL),
1533   _survivor_pool(NULL),
1534   _old_pool(NULL),
1535   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1536   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1537   _g1_policy(create_g1_policy(_gc_timer_stw)),
1538   _collection_set(this, _g1_policy),
1539   _dirty_card_queue_set(false),
1540   _is_alive_closure_cm(this),
1541   _is_alive_closure_stw(this),
1542   _ref_processor_cm(NULL),
1543   _ref_processor_stw(NULL),
1544   _bot(NULL),
1545   _hot_card_cache(NULL),
1546   _g1_rem_set(NULL),
1547   _cr(NULL),
1548   _g1mm(NULL),
1549   _preserved_marks_set(true /* in_c_heap */),
1550   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1551   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1552   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1553   _humongous_reclaim_candidates(),
1554   _has_humongous_reclaim_candidates(false),


1810 
1811   // Here we allocate the dummy HeapRegion that is required by the
1812   // G1AllocRegion class.
1813   HeapRegion* dummy_region = _hrm.get_dummy_region();
1814 
1815   // We'll re-use the same region whether the alloc region will
1816   // require BOT updates or not and, if it doesn't, then a non-young
1817   // region will complain that it cannot support allocations without
1818   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1819   dummy_region->set_eden();
1820   // Make sure it's full.
1821   dummy_region->set_top(dummy_region->end());
1822   G1AllocRegion::setup(this, dummy_region);
1823 
1824   _allocator->init_mutator_alloc_region();
1825 
1826   // Do create of the monitoring and management support so that
1827   // values in the heap have been properly initialized.
1828   _g1mm = new G1MonitoringSupport(this);
1829 
1830   _eden_pool = new G1EdenPool(this);
1831   _survivor_pool = new G1SurvivorPool(this);
1832   _old_pool = new G1OldGenPool(this);
1833 
1834   _full_gc_memory_manager.add_pool(_eden_pool);
1835   _full_gc_memory_manager.add_pool(_survivor_pool);
1836   _full_gc_memory_manager.add_pool(_old_pool);
1837 
1838   _memory_manager.add_pool(_eden_pool);
1839   _memory_manager.add_pool(_survivor_pool);
1840 
1841   G1StringDedup::initialize();
1842 
1843   _preserved_marks_set.init(ParallelGCThreads);
1844 
1845   _collection_set.initialize(max_regions());
1846 
1847   return JNI_OK;
1848 }
1849 
1850 void G1CollectedHeap::stop() {
1851   // Stop all concurrent threads. We do this to make sure these threads
1852   // do not continue to execute and access resources (e.g. logging)
1853   // that are destroyed during shutdown.
1854   _cr->stop();
1855   _young_gen_sampling_thread->stop();
1856   _cmThread->stop();
1857   if (G1StringDedup::is_enabled()) {
1858     G1StringDedup::stop();
1859   }
1860 }


2954 
2955     GCTraceCPUTime tcpu;
2956 
2957     FormatBuffer<> gc_string("Pause ");
2958     if (collector_state()->during_initial_mark_pause()) {
2959       gc_string.append("Initial Mark");
2960     } else if (collector_state()->gcs_are_young()) {
2961       gc_string.append("Young");
2962     } else {
2963       gc_string.append("Mixed");
2964     }
2965     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2966 
2967     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2968                                                                   workers()->active_workers(),
2969                                                                   Threads::number_of_non_daemon_threads());
2970     workers()->update_active_workers(active_workers);
2971     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2972 
2973     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2974     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
2975 
2976     // If the secondary_free_list is not empty, append it to the
2977     // free_list. No need to wait for the cleanup operation to finish;
2978     // the region allocation code will check the secondary_free_list
2979     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2980     // set, skip this step so that the region allocation code has to
2981     // get entries from the secondary_free_list.
2982     if (!G1StressConcRegionFreeing) {
2983       append_secondary_free_list_if_not_empty_with_lock();
2984     }
2985 
2986     G1HeapTransition heap_transition(this);
2987     size_t heap_used_bytes_before_gc = used();
2988 
2989     // Don't dynamically change the number of GC threads this early.  A value of
2990     // 0 is used to indicate serial work.  When parallel work is done,
2991     // it will be set.
2992 
2993     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2994       IsGCActiveMark x;


5367 
5368 public:
5369   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5370     _g1h(g1h) {}
5371 
5372   void do_code_blob(CodeBlob* cb) {
5373     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5374     if (nm == NULL) {
5375       return;
5376     }
5377 
5378     if (ScavengeRootsInCode) {
5379       _g1h->register_nmethod(nm);
5380     }
5381   }
5382 };
5383 
5384 void G1CollectedHeap::rebuild_strong_code_roots() {
5385   RebuildStrongCodeRootClosure blob_cl(this);
5386   CodeCache::blobs_do(&blob_cl);
5387 }
5388 
5389 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
5390   GrowableArray<GCMemoryManager*> memory_managers(2);
5391   memory_managers.append(&_memory_manager);
5392   memory_managers.append(&_full_gc_memory_manager);
5393   return memory_managers;
5394 }
5395 
5396 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5397   GrowableArray<MemoryPool*> memory_pools(3);
5398   memory_pools.append(_eden_pool);
5399   memory_pools.append(_survivor_pool);
5400   memory_pools.append(_old_pool);
5401   return memory_pools;
5402 }
< prev index next >