< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 48000 : [mq]: open.patch
rev 48001 : [mq]: 8191564-new.patch


1213   heap_transition->print();
1214   print_heap_after_gc();
1215   print_heap_regions();
1216 #ifdef TRACESPINNING
1217   ParallelTaskTerminator::print_termination_counts();
1218 #endif
1219 }
1220 
1221 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1222                                          bool clear_all_soft_refs) {
1223   assert_at_safepoint(true /* should_be_vm_thread */);
1224 
1225   if (GCLocker::check_active_before_gc()) {
1226     // Full GC was not completed.
1227     return false;
1228   }
1229 
1230   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1231       collector_policy()->should_clear_all_soft_refs();
1232 
1233   G1FullCollector collector(this, &_full_gc_mem_mgr, explicit_gc, do_clear_all_soft_refs);
1234   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1235 
1236   collector.prepare_collection();
1237   collector.collect();
1238   collector.complete_collection();
1239 
1240   // Full collection was successfully completed.
1241   return true;
1242 }
1243 
1244 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1245   // Currently, there is no facility in the do_full_collection(bool) API to notify
1246   // the caller that the collection did not succeed (e.g., because it was locked
1247   // out by the GC locker). So, right now, we'll ignore the return value.
1248   bool dummy = do_full_collection(true,                /* explicit_gc */
1249                                   clear_all_soft_refs);
1250 }
1251 
1252 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1253   // Capacity, free and used after the GC counted as full regions to


1510   // below will make sure of that and do any remaining clean up.
1511   _allocator->abandon_gc_alloc_regions();
1512 
1513   // Instead of tearing down / rebuilding the free lists here, we
1514   // could instead use the remove_all_pending() method on free_list to
1515   // remove only the ones that we need to remove.
1516   tear_down_region_sets(true /* free_list_only */);
1517   shrink_helper(shrink_bytes);
1518   rebuild_region_sets(true /* free_list_only */);
1519 
1520   _hrm.verify_optional();
1521   _verifier->verify_region_sets_optional();
1522 }
1523 
1524 // Public methods.
1525 
1526 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1527   CollectedHeap(),
1528   _young_gen_sampling_thread(NULL),
1529   _collector_policy(collector_policy),
1530   _mem_mgr("G1 Young Generation", "end of minor GC"),
1531   _full_gc_mem_mgr("G1 Old Generation", "end of major GC"),
1532   _eden_pool(NULL),
1533   _survivor_pool(NULL),
1534   _old_pool(NULL),
1535   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1536   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1537   _g1_policy(create_g1_policy(_gc_timer_stw)),
1538   _collection_set(this, _g1_policy),
1539   _dirty_card_queue_set(false),
1540   _is_alive_closure_cm(this),
1541   _is_alive_closure_stw(this),
1542   _ref_processor_cm(NULL),
1543   _ref_processor_stw(NULL),
1544   _bot(NULL),
1545   _hot_card_cache(NULL),
1546   _g1_rem_set(NULL),
1547   _cr(NULL),
1548   _g1mm(NULL),
1549   _preserved_marks_set(true /* in_c_heap */),
1550   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1551   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),


1814 
1815   // We'll re-use the same region whether the alloc region will
1816   // require BOT updates or not and, if it doesn't, then a non-young
1817   // region will complain that it cannot support allocations without
1818   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1819   dummy_region->set_eden();
1820   // Make sure it's full.
1821   dummy_region->set_top(dummy_region->end());
1822   G1AllocRegion::setup(this, dummy_region);
1823 
1824   _allocator->init_mutator_alloc_region();
1825 
1826   // Do create of the monitoring and management support so that
1827   // values in the heap have been properly initialized.
1828   _g1mm = new G1MonitoringSupport(this);
1829 
1830   _eden_pool = new G1EdenPool(this);
1831   _survivor_pool = new G1SurvivorPool(this);
1832   _old_pool = new G1OldGenPool(this);
1833 
1834   _full_gc_mem_mgr.add_pool(_eden_pool);
1835   _full_gc_mem_mgr.add_pool(_survivor_pool);
1836   _full_gc_mem_mgr.add_pool(_old_pool);
1837 
1838   _mem_mgr.add_pool(_eden_pool);
1839   _mem_mgr.add_pool(_survivor_pool);
1840 
1841   G1StringDedup::initialize();
1842 
1843   _preserved_marks_set.init(ParallelGCThreads);
1844 
1845   _collection_set.initialize(max_regions());
1846 
1847   return JNI_OK;
1848 }
1849 
1850 void G1CollectedHeap::stop() {
1851   // Stop all concurrent threads. We do this to make sure these threads
1852   // do not continue to execute and access resources (e.g. logging)
1853   // that are destroyed during shutdown.
1854   _cr->stop();
1855   _young_gen_sampling_thread->stop();
1856   _cmThread->stop();
1857   if (G1StringDedup::is_enabled()) {
1858     G1StringDedup::stop();
1859   }


2954 
2955     GCTraceCPUTime tcpu;
2956 
2957     FormatBuffer<> gc_string("Pause ");
2958     if (collector_state()->during_initial_mark_pause()) {
2959       gc_string.append("Initial Mark");
2960     } else if (collector_state()->gcs_are_young()) {
2961       gc_string.append("Young");
2962     } else {
2963       gc_string.append("Mixed");
2964     }
2965     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2966 
2967     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2968                                                                   workers()->active_workers(),
2969                                                                   Threads::number_of_non_daemon_threads());
2970     workers()->update_active_workers(active_workers);
2971     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2972 
2973     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2974     TraceMemoryManagerStats tms(&_mem_mgr, gc_cause());
2975 
2976     // If the secondary_free_list is not empty, append it to the
2977     // free_list. No need to wait for the cleanup operation to finish;
2978     // the region allocation code will check the secondary_free_list
2979     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2980     // set, skip this step so that the region allocation code has to
2981     // get entries from the secondary_free_list.
2982     if (!G1StressConcRegionFreeing) {
2983       append_secondary_free_list_if_not_empty_with_lock();
2984     }
2985 
2986     G1HeapTransition heap_transition(this);
2987     size_t heap_used_bytes_before_gc = used();
2988 
2989     // Don't dynamically change the number of GC threads this early.  A value of
2990     // 0 is used to indicate serial work.  When parallel work is done,
2991     // it will be set.
2992 
2993     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2994       IsGCActiveMark x;


5371 
5372   void do_code_blob(CodeBlob* cb) {
5373     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5374     if (nm == NULL) {
5375       return;
5376     }
5377 
5378     if (ScavengeRootsInCode) {
5379       _g1h->register_nmethod(nm);
5380     }
5381   }
5382 };
5383 
5384 void G1CollectedHeap::rebuild_strong_code_roots() {
5385   RebuildStrongCodeRootClosure blob_cl(this);
5386   CodeCache::blobs_do(&blob_cl);
5387 }
5388 
5389 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
5390   GrowableArray<GCMemoryManager*> memory_managers(2);
5391   memory_managers.append(&_mem_mgr);
5392   memory_managers.append(&_full_gc_mem_mgr);
5393   return memory_managers;
5394 }
5395 
5396 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5397   GrowableArray<MemoryPool*> memory_pools(3);
5398   memory_pools.append(_eden_pool);
5399   memory_pools.append(_survivor_pool);
5400   memory_pools.append(_old_pool);
5401   return memory_pools;
5402 }


1213   heap_transition->print();
1214   print_heap_after_gc();
1215   print_heap_regions();
1216 #ifdef TRACESPINNING
1217   ParallelTaskTerminator::print_termination_counts();
1218 #endif
1219 }
1220 
1221 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1222                                          bool clear_all_soft_refs) {
1223   assert_at_safepoint(true /* should_be_vm_thread */);
1224 
1225   if (GCLocker::check_active_before_gc()) {
1226     // Full GC was not completed.
1227     return false;
1228   }
1229 
1230   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1231       collector_policy()->should_clear_all_soft_refs();
1232 
1233   G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
1234   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1235 
1236   collector.prepare_collection();
1237   collector.collect();
1238   collector.complete_collection();
1239 
1240   // Full collection was successfully completed.
1241   return true;
1242 }
1243 
1244 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1245   // Currently, there is no facility in the do_full_collection(bool) API to notify
1246   // the caller that the collection did not succeed (e.g., because it was locked
1247   // out by the GC locker). So, right now, we'll ignore the return value.
1248   bool dummy = do_full_collection(true,                /* explicit_gc */
1249                                   clear_all_soft_refs);
1250 }
1251 
1252 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1253   // Capacity, free and used after the GC counted as full regions to


1510   // below will make sure of that and do any remaining clean up.
1511   _allocator->abandon_gc_alloc_regions();
1512 
1513   // Instead of tearing down / rebuilding the free lists here, we
1514   // could instead use the remove_all_pending() method on free_list to
1515   // remove only the ones that we need to remove.
1516   tear_down_region_sets(true /* free_list_only */);
1517   shrink_helper(shrink_bytes);
1518   rebuild_region_sets(true /* free_list_only */);
1519 
1520   _hrm.verify_optional();
1521   _verifier->verify_region_sets_optional();
1522 }
1523 
1524 // Public methods.
1525 
1526 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1527   CollectedHeap(),
1528   _young_gen_sampling_thread(NULL),
1529   _collector_policy(collector_policy),
1530   _memory_manager("G1 Young Generation", "end of minor GC"),
1531   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1532   _eden_pool(NULL),
1533   _survivor_pool(NULL),
1534   _old_pool(NULL),
1535   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1536   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1537   _g1_policy(create_g1_policy(_gc_timer_stw)),
1538   _collection_set(this, _g1_policy),
1539   _dirty_card_queue_set(false),
1540   _is_alive_closure_cm(this),
1541   _is_alive_closure_stw(this),
1542   _ref_processor_cm(NULL),
1543   _ref_processor_stw(NULL),
1544   _bot(NULL),
1545   _hot_card_cache(NULL),
1546   _g1_rem_set(NULL),
1547   _cr(NULL),
1548   _g1mm(NULL),
1549   _preserved_marks_set(true /* in_c_heap */),
1550   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1551   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),


1814 
1815   // We'll re-use the same region whether the alloc region will
1816   // require BOT updates or not and, if it doesn't, then a non-young
1817   // region will complain that it cannot support allocations without
1818   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1819   dummy_region->set_eden();
1820   // Make sure it's full.
1821   dummy_region->set_top(dummy_region->end());
1822   G1AllocRegion::setup(this, dummy_region);
1823 
1824   _allocator->init_mutator_alloc_region();
1825 
1826   // Do create of the monitoring and management support so that
1827   // values in the heap have been properly initialized.
1828   _g1mm = new G1MonitoringSupport(this);
1829 
1830   _eden_pool = new G1EdenPool(this);
1831   _survivor_pool = new G1SurvivorPool(this);
1832   _old_pool = new G1OldGenPool(this);
1833 
1834   _full_gc_memory_manager.add_pool(_eden_pool);
1835   _full_gc_memory_manager.add_pool(_survivor_pool);
1836   _full_gc_memory_manager.add_pool(_old_pool);
1837 
1838   _memory_manager.add_pool(_eden_pool);
1839   _memory_manager.add_pool(_survivor_pool);
1840 
1841   G1StringDedup::initialize();
1842 
1843   _preserved_marks_set.init(ParallelGCThreads);
1844 
1845   _collection_set.initialize(max_regions());
1846 
1847   return JNI_OK;
1848 }
1849 
1850 void G1CollectedHeap::stop() {
1851   // Stop all concurrent threads. We do this to make sure these threads
1852   // do not continue to execute and access resources (e.g. logging)
1853   // that are destroyed during shutdown.
1854   _cr->stop();
1855   _young_gen_sampling_thread->stop();
1856   _cmThread->stop();
1857   if (G1StringDedup::is_enabled()) {
1858     G1StringDedup::stop();
1859   }


2954 
2955     GCTraceCPUTime tcpu;
2956 
2957     FormatBuffer<> gc_string("Pause ");
2958     if (collector_state()->during_initial_mark_pause()) {
2959       gc_string.append("Initial Mark");
2960     } else if (collector_state()->gcs_are_young()) {
2961       gc_string.append("Young");
2962     } else {
2963       gc_string.append("Mixed");
2964     }
2965     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2966 
2967     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2968                                                                   workers()->active_workers(),
2969                                                                   Threads::number_of_non_daemon_threads());
2970     workers()->update_active_workers(active_workers);
2971     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2972 
2973     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2974     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
2975 
2976     // If the secondary_free_list is not empty, append it to the
2977     // free_list. No need to wait for the cleanup operation to finish;
2978     // the region allocation code will check the secondary_free_list
2979     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2980     // set, skip this step so that the region allocation code has to
2981     // get entries from the secondary_free_list.
2982     if (!G1StressConcRegionFreeing) {
2983       append_secondary_free_list_if_not_empty_with_lock();
2984     }
2985 
2986     G1HeapTransition heap_transition(this);
2987     size_t heap_used_bytes_before_gc = used();
2988 
2989     // Don't dynamically change the number of GC threads this early.  A value of
2990     // 0 is used to indicate serial work.  When parallel work is done,
2991     // it will be set.
2992 
2993     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2994       IsGCActiveMark x;


5371 
5372   void do_code_blob(CodeBlob* cb) {
5373     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5374     if (nm == NULL) {
5375       return;
5376     }
5377 
5378     if (ScavengeRootsInCode) {
5379       _g1h->register_nmethod(nm);
5380     }
5381   }
5382 };
5383 
5384 void G1CollectedHeap::rebuild_strong_code_roots() {
5385   RebuildStrongCodeRootClosure blob_cl(this);
5386   CodeCache::blobs_do(&blob_cl);
5387 }
5388 
5389 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
5390   GrowableArray<GCMemoryManager*> memory_managers(2);
5391   memory_managers.append(&_memory_manager);
5392   memory_managers.append(&_full_gc_memory_manager);
5393   return memory_managers;
5394 }
5395 
5396 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5397   GrowableArray<MemoryPool*> memory_pools(3);
5398   memory_pools.append(_eden_pool);
5399   memory_pools.append(_survivor_pool);
5400   memory_pools.append(_old_pool);
5401   return memory_pools;
5402 }
< prev index next >