27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/bufferingOopClosure.hpp"
32 #include "gc/g1/concurrentMarkThread.inline.hpp"
33 #include "gc/g1/g1Allocator.inline.hpp"
34 #include "gc/g1/g1CollectedHeap.inline.hpp"
35 #include "gc/g1/g1CollectionSet.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ConcurrentRefine.hpp"
39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
40 #include "gc/g1/g1EvacStats.inline.hpp"
41 #include "gc/g1/g1FullCollector.hpp"
42 #include "gc/g1/g1GCPhaseTimes.hpp"
43 #include "gc/g1/g1HeapSizingPolicy.hpp"
44 #include "gc/g1/g1HeapTransition.hpp"
45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1OopClosures.inline.hpp"
48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
49 #include "gc/g1/g1Policy.hpp"
50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
51 #include "gc/g1/g1RemSet.hpp"
52 #include "gc/g1/g1RootClosures.hpp"
53 #include "gc/g1/g1RootProcessor.hpp"
54 #include "gc/g1/g1StringDedup.hpp"
55 #include "gc/g1/g1YCTypes.hpp"
56 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
57 #include "gc/g1/heapRegion.inline.hpp"
58 #include "gc/g1/heapRegionRemSet.hpp"
59 #include "gc/g1/heapRegionSet.inline.hpp"
60 #include "gc/g1/vm_operations_g1.hpp"
61 #include "gc/shared/gcHeapSummary.hpp"
62 #include "gc/shared/gcId.hpp"
63 #include "gc/shared/gcLocker.inline.hpp"
64 #include "gc/shared/gcTimer.hpp"
65 #include "gc/shared/gcTrace.hpp"
66 #include "gc/shared/gcTraceTime.inline.hpp"
1212 heap_transition->print();
1213 print_heap_after_gc();
1214 print_heap_regions();
1215 #ifdef TRACESPINNING
1216 ParallelTaskTerminator::print_termination_counts();
1217 #endif
1218 }
1219
1220 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1221 bool clear_all_soft_refs) {
1222 assert_at_safepoint(true /* should_be_vm_thread */);
1223
1224 if (GCLocker::check_active_before_gc()) {
1225 // Full GC was not completed.
1226 return false;
1227 }
1228
1229 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1230 collector_policy()->should_clear_all_soft_refs();
1231
1232 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1233 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1234
1235 collector.prepare_collection();
1236 collector.collect();
1237 collector.complete_collection();
1238
1239 // Full collection was successfully completed.
1240 return true;
1241 }
1242
1243 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1244 // Currently, there is no facility in the do_full_collection(bool) API to notify
1245 // the caller that the collection did not succeed (e.g., because it was locked
1246 // out by the GC locker). So, right now, we'll ignore the return value.
1247 bool dummy = do_full_collection(true, /* explicit_gc */
1248 clear_all_soft_refs);
1249 }
1250
1251 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1252 // Capacity, free and used after the GC counted as full regions to
1509 // below will make sure of that and do any remaining clean up.
1510 _allocator->abandon_gc_alloc_regions();
1511
1512 // Instead of tearing down / rebuilding the free lists here, we
1513 // could instead use the remove_all_pending() method on free_list to
1514 // remove only the ones that we need to remove.
1515 tear_down_region_sets(true /* free_list_only */);
1516 shrink_helper(shrink_bytes);
1517 rebuild_region_sets(true /* free_list_only */);
1518
1519 _hrm.verify_optional();
1520 _verifier->verify_region_sets_optional();
1521 }
1522
1523 // Public methods.
1524
1525 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1526 CollectedHeap(),
1527 _young_gen_sampling_thread(NULL),
1528 _collector_policy(collector_policy),
1529 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1530 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1531 _g1_policy(create_g1_policy(_gc_timer_stw)),
1532 _collection_set(this, _g1_policy),
1533 _dirty_card_queue_set(false),
1534 _is_alive_closure_cm(this),
1535 _is_alive_closure_stw(this),
1536 _ref_processor_cm(NULL),
1537 _ref_processor_stw(NULL),
1538 _bot(NULL),
1539 _hot_card_cache(NULL),
1540 _g1_rem_set(NULL),
1541 _cr(NULL),
1542 _g1mm(NULL),
1543 _preserved_marks_set(true /* in_c_heap */),
1544 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1545 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1546 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1547 _humongous_reclaim_candidates(),
1548 _has_humongous_reclaim_candidates(false),
1813 dummy_region->set_eden();
1814 // Make sure it's full.
1815 dummy_region->set_top(dummy_region->end());
1816 G1AllocRegion::setup(this, dummy_region);
1817
1818 _allocator->init_mutator_alloc_region();
1819
1820 // Do create of the monitoring and management support so that
1821 // values in the heap have been properly initialized.
1822 _g1mm = new G1MonitoringSupport(this);
1823
1824 G1StringDedup::initialize();
1825
1826 _preserved_marks_set.init(ParallelGCThreads);
1827
1828 _collection_set.initialize(max_regions());
1829
1830 return JNI_OK;
1831 }
1832
1833 void G1CollectedHeap::stop() {
1834 // Stop all concurrent threads. We do this to make sure these threads
1835 // do not continue to execute and access resources (e.g. logging)
1836 // that are destroyed during shutdown.
1837 _cr->stop();
1838 _young_gen_sampling_thread->stop();
1839 _cmThread->stop();
1840 if (G1StringDedup::is_enabled()) {
1841 G1StringDedup::stop();
1842 }
1843 }
1844
1845 void G1CollectedHeap::safepoint_synchronize_begin() {
1846 SuspendibleThreadSet::synchronize();
1847 }
1848
1849 void G1CollectedHeap::safepoint_synchronize_end() {
1850 SuspendibleThreadSet::desynchronize();
1851 }
1852
1853 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1854 return HeapRegion::max_region_size();
1855 }
1856
1857 void G1CollectedHeap::post_initialize() {
1858 ref_processing_init();
1859 }
1860
1861 void G1CollectedHeap::ref_processing_init() {
1862 // Reference processing in G1 currently works as follows:
1863 //
1864 // * There are two reference processor instances. One is
1865 // used to record and process discovered references
1866 // during concurrent marking; the other is used to
1867 // record and process references during STW pauses
1868 // (both full and incremental).
1869 // * Both ref processors need to 'span' the entire heap as
1870 // the regions in the collection set may be dotted around.
1871 //
1872 // * For the concurrent marking ref processor:
1873 // * Reference discovery is enabled at initial marking.
1874 // * Reference discovery is disabled and the discovered
1875 // references processed etc during remarking.
1876 // * Reference discovery is MT (see below).
1877 // * Reference discovery requires a barrier (see below).
2937
2938 GCTraceCPUTime tcpu;
2939
2940 FormatBuffer<> gc_string("Pause ");
2941 if (collector_state()->during_initial_mark_pause()) {
2942 gc_string.append("Initial Mark");
2943 } else if (collector_state()->gcs_are_young()) {
2944 gc_string.append("Young");
2945 } else {
2946 gc_string.append("Mixed");
2947 }
2948 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2949
2950 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2951 workers()->active_workers(),
2952 Threads::number_of_non_daemon_threads());
2953 workers()->update_active_workers(active_workers);
2954 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2955
2956 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2957 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
2958
2959 // If the secondary_free_list is not empty, append it to the
2960 // free_list. No need to wait for the cleanup operation to finish;
2961 // the region allocation code will check the secondary_free_list
2962 // and wait if necessary. If the G1StressConcRegionFreeing flag is
2963 // set, skip this step so that the region allocation code has to
2964 // get entries from the secondary_free_list.
2965 if (!G1StressConcRegionFreeing) {
2966 append_secondary_free_list_if_not_empty_with_lock();
2967 }
2968
2969 G1HeapTransition heap_transition(this);
2970 size_t heap_used_bytes_before_gc = used();
2971
2972 // Don't dynamically change the number of GC threads this early. A value of
2973 // 0 is used to indicate serial work. When parallel work is done,
2974 // it will be set.
2975
2976 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2977 IsGCActiveMark x;
5350
5351 public:
5352 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5353 _g1h(g1h) {}
5354
5355 void do_code_blob(CodeBlob* cb) {
5356 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5357 if (nm == NULL) {
5358 return;
5359 }
5360
5361 if (ScavengeRootsInCode) {
5362 _g1h->register_nmethod(nm);
5363 }
5364 }
5365 };
5366
5367 void G1CollectedHeap::rebuild_strong_code_roots() {
5368 RebuildStrongCodeRootClosure blob_cl(this);
5369 CodeCache::blobs_do(&blob_cl);
5370 }
|
27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/bufferingOopClosure.hpp"
32 #include "gc/g1/concurrentMarkThread.inline.hpp"
33 #include "gc/g1/g1Allocator.inline.hpp"
34 #include "gc/g1/g1CollectedHeap.inline.hpp"
35 #include "gc/g1/g1CollectionSet.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ConcurrentRefine.hpp"
39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
40 #include "gc/g1/g1EvacStats.inline.hpp"
41 #include "gc/g1/g1FullCollector.hpp"
42 #include "gc/g1/g1GCPhaseTimes.hpp"
43 #include "gc/g1/g1HeapSizingPolicy.hpp"
44 #include "gc/g1/g1HeapTransition.hpp"
45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1MemoryPool.hpp"
48 #include "gc/g1/g1OopClosures.inline.hpp"
49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
50 #include "gc/g1/g1Policy.hpp"
51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
52 #include "gc/g1/g1RemSet.hpp"
53 #include "gc/g1/g1RootClosures.hpp"
54 #include "gc/g1/g1RootProcessor.hpp"
55 #include "gc/g1/g1StringDedup.hpp"
56 #include "gc/g1/g1YCTypes.hpp"
57 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
58 #include "gc/g1/heapRegion.inline.hpp"
59 #include "gc/g1/heapRegionRemSet.hpp"
60 #include "gc/g1/heapRegionSet.inline.hpp"
61 #include "gc/g1/vm_operations_g1.hpp"
62 #include "gc/shared/gcHeapSummary.hpp"
63 #include "gc/shared/gcId.hpp"
64 #include "gc/shared/gcLocker.inline.hpp"
65 #include "gc/shared/gcTimer.hpp"
66 #include "gc/shared/gcTrace.hpp"
67 #include "gc/shared/gcTraceTime.inline.hpp"
1213 heap_transition->print();
1214 print_heap_after_gc();
1215 print_heap_regions();
1216 #ifdef TRACESPINNING
1217 ParallelTaskTerminator::print_termination_counts();
1218 #endif
1219 }
1220
1221 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1222 bool clear_all_soft_refs) {
1223 assert_at_safepoint(true /* should_be_vm_thread */);
1224
1225 if (GCLocker::check_active_before_gc()) {
1226 // Full GC was not completed.
1227 return false;
1228 }
1229
1230 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1231 collector_policy()->should_clear_all_soft_refs();
1232
1233 G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
1234 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1235
1236 collector.prepare_collection();
1237 collector.collect();
1238 collector.complete_collection();
1239
1240 // Full collection was successfully completed.
1241 return true;
1242 }
1243
1244 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1245 // Currently, there is no facility in the do_full_collection(bool) API to notify
1246 // the caller that the collection did not succeed (e.g., because it was locked
1247 // out by the GC locker). So, right now, we'll ignore the return value.
1248 bool dummy = do_full_collection(true, /* explicit_gc */
1249 clear_all_soft_refs);
1250 }
1251
1252 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1253 // Capacity, free and used after the GC counted as full regions to
1510 // below will make sure of that and do any remaining clean up.
1511 _allocator->abandon_gc_alloc_regions();
1512
1513 // Instead of tearing down / rebuilding the free lists here, we
1514 // could instead use the remove_all_pending() method on free_list to
1515 // remove only the ones that we need to remove.
1516 tear_down_region_sets(true /* free_list_only */);
1517 shrink_helper(shrink_bytes);
1518 rebuild_region_sets(true /* free_list_only */);
1519
1520 _hrm.verify_optional();
1521 _verifier->verify_region_sets_optional();
1522 }
1523
1524 // Public methods.
1525
1526 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1527 CollectedHeap(),
1528 _young_gen_sampling_thread(NULL),
1529 _collector_policy(collector_policy),
1530 _memory_manager("G1 Young Generation", "end of minor GC"),
1531 _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1532 _eden_pool(NULL),
1533 _survivor_pool(NULL),
1534 _old_pool(NULL),
1535 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1536 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1537 _g1_policy(create_g1_policy(_gc_timer_stw)),
1538 _collection_set(this, _g1_policy),
1539 _dirty_card_queue_set(false),
1540 _is_alive_closure_cm(this),
1541 _is_alive_closure_stw(this),
1542 _ref_processor_cm(NULL),
1543 _ref_processor_stw(NULL),
1544 _bot(NULL),
1545 _hot_card_cache(NULL),
1546 _g1_rem_set(NULL),
1547 _cr(NULL),
1548 _g1mm(NULL),
1549 _preserved_marks_set(true /* in_c_heap */),
1550 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1551 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1552 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1553 _humongous_reclaim_candidates(),
1554 _has_humongous_reclaim_candidates(false),
1819 dummy_region->set_eden();
1820 // Make sure it's full.
1821 dummy_region->set_top(dummy_region->end());
1822 G1AllocRegion::setup(this, dummy_region);
1823
1824 _allocator->init_mutator_alloc_region();
1825
1826 // Do create of the monitoring and management support so that
1827 // values in the heap have been properly initialized.
1828 _g1mm = new G1MonitoringSupport(this);
1829
1830 G1StringDedup::initialize();
1831
1832 _preserved_marks_set.init(ParallelGCThreads);
1833
1834 _collection_set.initialize(max_regions());
1835
1836 return JNI_OK;
1837 }
1838
1839 void G1CollectedHeap::initialize_serviceability() {
1840 _eden_pool = new G1EdenPool(this);
1841 _survivor_pool = new G1SurvivorPool(this);
1842 _old_pool = new G1OldGenPool(this);
1843
1844 _full_gc_memory_manager.add_pool(_eden_pool);
1845 _full_gc_memory_manager.add_pool(_survivor_pool);
1846 _full_gc_memory_manager.add_pool(_old_pool);
1847
1848 _memory_manager.add_pool(_eden_pool);
1849 _memory_manager.add_pool(_survivor_pool);
1850
1851 }
1852
1853 void G1CollectedHeap::stop() {
1854 // Stop all concurrent threads. We do this to make sure these threads
1855 // do not continue to execute and access resources (e.g. logging)
1856 // that are destroyed during shutdown.
1857 _cr->stop();
1858 _young_gen_sampling_thread->stop();
1859 _cmThread->stop();
1860 if (G1StringDedup::is_enabled()) {
1861 G1StringDedup::stop();
1862 }
1863 }
1864
1865 void G1CollectedHeap::safepoint_synchronize_begin() {
1866 SuspendibleThreadSet::synchronize();
1867 }
1868
1869 void G1CollectedHeap::safepoint_synchronize_end() {
1870 SuspendibleThreadSet::desynchronize();
1871 }
1872
1873 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1874 return HeapRegion::max_region_size();
1875 }
1876
1877 void G1CollectedHeap::post_initialize() {
1878 CollectedHeap::post_initialize();
1879 ref_processing_init();
1880 }
1881
1882 void G1CollectedHeap::ref_processing_init() {
1883 // Reference processing in G1 currently works as follows:
1884 //
1885 // * There are two reference processor instances. One is
1886 // used to record and process discovered references
1887 // during concurrent marking; the other is used to
1888 // record and process references during STW pauses
1889 // (both full and incremental).
1890 // * Both ref processors need to 'span' the entire heap as
1891 // the regions in the collection set may be dotted around.
1892 //
1893 // * For the concurrent marking ref processor:
1894 // * Reference discovery is enabled at initial marking.
1895 // * Reference discovery is disabled and the discovered
1896 // references processed etc during remarking.
1897 // * Reference discovery is MT (see below).
1898 // * Reference discovery requires a barrier (see below).
2958
2959 GCTraceCPUTime tcpu;
2960
2961 FormatBuffer<> gc_string("Pause ");
2962 if (collector_state()->during_initial_mark_pause()) {
2963 gc_string.append("Initial Mark");
2964 } else if (collector_state()->gcs_are_young()) {
2965 gc_string.append("Young");
2966 } else {
2967 gc_string.append("Mixed");
2968 }
2969 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2970
2971 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2972 workers()->active_workers(),
2973 Threads::number_of_non_daemon_threads());
2974 workers()->update_active_workers(active_workers);
2975 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2976
2977 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2978 TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
2979
2980 // If the secondary_free_list is not empty, append it to the
2981 // free_list. No need to wait for the cleanup operation to finish;
2982 // the region allocation code will check the secondary_free_list
2983 // and wait if necessary. If the G1StressConcRegionFreeing flag is
2984 // set, skip this step so that the region allocation code has to
2985 // get entries from the secondary_free_list.
2986 if (!G1StressConcRegionFreeing) {
2987 append_secondary_free_list_if_not_empty_with_lock();
2988 }
2989
2990 G1HeapTransition heap_transition(this);
2991 size_t heap_used_bytes_before_gc = used();
2992
2993 // Don't dynamically change the number of GC threads this early. A value of
2994 // 0 is used to indicate serial work. When parallel work is done,
2995 // it will be set.
2996
2997 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2998 IsGCActiveMark x;
5371
5372 public:
5373 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5374 _g1h(g1h) {}
5375
5376 void do_code_blob(CodeBlob* cb) {
5377 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5378 if (nm == NULL) {
5379 return;
5380 }
5381
5382 if (ScavengeRootsInCode) {
5383 _g1h->register_nmethod(nm);
5384 }
5385 }
5386 };
5387
5388 void G1CollectedHeap::rebuild_strong_code_roots() {
5389 RebuildStrongCodeRootClosure blob_cl(this);
5390 CodeCache::blobs_do(&blob_cl);
5391 }
5392
5393 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
5394 GrowableArray<GCMemoryManager*> memory_managers(2);
5395 memory_managers.append(&_memory_manager);
5396 memory_managers.append(&_full_gc_memory_manager);
5397 return memory_managers;
5398 }
5399
5400 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5401 GrowableArray<MemoryPool*> memory_pools(3);
5402 memory_pools.append(_eden_pool);
5403 memory_pools.append(_survivor_pool);
5404 memory_pools.append(_old_pool);
5405 return memory_pools;
5406 }
|