< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




1151   // the compaction events.
1152   print_hrm_post_compaction();
1153   heap_transition->print();
1154   print_heap_after_gc();
1155   print_heap_regions();
1156 #ifdef TRACESPINNING
1157   ParallelTaskTerminator::print_termination_counts();
1158 #endif
1159 }
1160 
1161 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1162                                          bool clear_all_soft_refs) {
1163   assert_at_safepoint(true /* should_be_vm_thread */);
1164 
1165   if (GCLocker::check_active_before_gc()) {
1166     // Full GC was not completed.
1167     return false;
1168   }
1169 
1170   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1171       collector_policy()->should_clear_all_soft_refs();
1172 
1173   G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
1174   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1175 
1176   collector.prepare_collection();
1177   collector.collect();
1178   collector.complete_collection();
1179 
1180   // Full collection was successfully completed.
1181   return true;
1182 }
1183 
1184 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1185   // Currently, there is no facility in the do_full_collection(bool) API to notify
1186   // the caller that the collection did not succeed (e.g., because it was locked
1187   // out by the GC locker). So, right now, we'll ignore the return value.
1188   bool dummy = do_full_collection(true,                /* explicit_gc */
1189                                   clear_all_soft_refs);
1190 }
1191 


1326                                             true, /* clear_all_soft_refs */
1327                                             true, /* expect_null_mutator_alloc_region */
1328                                             succeeded);
1329 
1330   if (result != NULL || !*succeeded) {
1331     return result;
1332   }
1333 
1334   // Attempts to allocate, no GC
1335   result = satisfy_failed_allocation_helper(word_size,
1336                                             context,
1337                                             false, /* do_gc */
1338                                             false, /* clear_all_soft_refs */
1339                                             true,  /* expect_null_mutator_alloc_region */
1340                                             succeeded);
1341 
1342   if (result != NULL) {
1343     return result;
1344   }
1345 
1346   assert(!collector_policy()->should_clear_all_soft_refs(),
1347          "Flag should have been handled and cleared prior to this point");
1348 
1349   // What else?  We might try synchronous finalization later.  If the total
1350   // space available is large enough for the allocation, then a more
1351   // complete compaction phase than we've tried so far might be
1352   // appropriate.
1353   return NULL;
1354 }
1355 
1356 // Attempting to expand the heap sufficiently
1357 // to support an allocation of the given "word_size".  If
1358 // successful, perform the allocation and return the address of the
1359 // allocated block, or else "NULL".
1360 
1361 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1362   assert_at_safepoint(true /* should_be_vm_thread */);
1363 
1364   _verifier->verify_region_sets_optional();
1365 
1366   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);


1446   // below will make sure of that and do any remaining clean up.
1447   _allocator->abandon_gc_alloc_regions();
1448 
1449   // Instead of tearing down / rebuilding the free lists here, we
1450   // could instead use the remove_all_pending() method on free_list to
1451   // remove only the ones that we need to remove.
1452   tear_down_region_sets(true /* free_list_only */);
1453   shrink_helper(shrink_bytes);
1454   rebuild_region_sets(true /* free_list_only */);
1455 
1456   _hrm.verify_optional();
1457   _verifier->verify_region_sets_optional();
1458 }
1459 
1460 // Public methods.
1461 
1462 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1463   CollectedHeap(),
1464   _young_gen_sampling_thread(NULL),
1465   _collector_policy(collector_policy),

1466   _memory_manager("G1 Young Generation", "end of minor GC"),
1467   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1468   _eden_pool(NULL),
1469   _survivor_pool(NULL),
1470   _old_pool(NULL),
1471   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1472   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1473   _g1_policy(create_g1_policy(_gc_timer_stw)),
1474   _collection_set(this, _g1_policy),
1475   _dirty_card_queue_set(false),
1476   _is_alive_closure_cm(this),
1477   _is_alive_closure_stw(this),
1478   _ref_processor_cm(NULL),
1479   _ref_processor_stw(NULL),
1480   _bot(NULL),
1481   _hot_card_cache(NULL),
1482   _g1_rem_set(NULL),
1483   _cr(NULL),
1484   _g1mm(NULL),
1485   _preserved_marks_set(true /* in_c_heap */),


1874   // STW ref processor
1875   _ref_processor_stw =
1876     new ReferenceProcessor(mr,    // span
1877                            mt_processing,
1878                                 // mt processing
1879                            ParallelGCThreads,
1880                                 // degree of mt processing
1881                            (ParallelGCThreads > 1),
1882                                 // mt discovery
1883                            ParallelGCThreads,
1884                                 // degree of mt discovery
1885                            true,
1886                                 // Reference discovery is atomic
1887                            &_is_alive_closure_stw);
1888                                 // is alive closure
1889                                 // (for efficiency/performance)
1890 }
1891 
1892 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1893   return _collector_policy;




1894 }
1895 
1896 size_t G1CollectedHeap::capacity() const {
1897   return _hrm.length() * HeapRegion::GrainBytes;
1898 }
1899 
1900 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1901   return _hrm.total_free_bytes();
1902 }
1903 
1904 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1905   hr->reset_gc_time_stamp();
1906 }
1907 
1908 #ifndef PRODUCT
1909 
1910 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1911 private:
1912   unsigned _gc_time_stamp;
1913   bool _failures;




1151   // the compaction events.
1152   print_hrm_post_compaction();
1153   heap_transition->print();
1154   print_heap_after_gc();
1155   print_heap_regions();
1156 #ifdef TRACESPINNING
1157   ParallelTaskTerminator::print_termination_counts();
1158 #endif
1159 }
1160 
1161 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1162                                          bool clear_all_soft_refs) {
1163   assert_at_safepoint(true /* should_be_vm_thread */);
1164 
1165   if (GCLocker::check_active_before_gc()) {
1166     // Full GC was not completed.
1167     return false;
1168   }
1169 
1170   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1171       soft_ref_policy()->should_clear_all_soft_refs();
1172 
1173   G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
1174   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1175 
1176   collector.prepare_collection();
1177   collector.collect();
1178   collector.complete_collection();
1179 
1180   // Full collection was successfully completed.
1181   return true;
1182 }
1183 
1184 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1185   // Currently, there is no facility in the do_full_collection(bool) API to notify
1186   // the caller that the collection did not succeed (e.g., because it was locked
1187   // out by the GC locker). So, right now, we'll ignore the return value.
1188   bool dummy = do_full_collection(true,                /* explicit_gc */
1189                                   clear_all_soft_refs);
1190 }
1191 


1326                                             true, /* clear_all_soft_refs */
1327                                             true, /* expect_null_mutator_alloc_region */
1328                                             succeeded);
1329 
1330   if (result != NULL || !*succeeded) {
1331     return result;
1332   }
1333 
1334   // Attempts to allocate, no GC
1335   result = satisfy_failed_allocation_helper(word_size,
1336                                             context,
1337                                             false, /* do_gc */
1338                                             false, /* clear_all_soft_refs */
1339                                             true,  /* expect_null_mutator_alloc_region */
1340                                             succeeded);
1341 
1342   if (result != NULL) {
1343     return result;
1344   }
1345 
1346   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
1347          "Flag should have been handled and cleared prior to this point");
1348 
1349   // What else?  We might try synchronous finalization later.  If the total
1350   // space available is large enough for the allocation, then a more
1351   // complete compaction phase than we've tried so far might be
1352   // appropriate.
1353   return NULL;
1354 }
1355 
1356 // Attempting to expand the heap sufficiently
1357 // to support an allocation of the given "word_size".  If
1358 // successful, perform the allocation and return the address of the
1359 // allocated block, or else "NULL".
1360 
1361 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1362   assert_at_safepoint(true /* should_be_vm_thread */);
1363 
1364   _verifier->verify_region_sets_optional();
1365 
1366   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);


1446   // below will make sure of that and do any remaining clean up.
1447   _allocator->abandon_gc_alloc_regions();
1448 
1449   // Instead of tearing down / rebuilding the free lists here, we
1450   // could instead use the remove_all_pending() method on free_list to
1451   // remove only the ones that we need to remove.
1452   tear_down_region_sets(true /* free_list_only */);
1453   shrink_helper(shrink_bytes);
1454   rebuild_region_sets(true /* free_list_only */);
1455 
1456   _hrm.verify_optional();
1457   _verifier->verify_region_sets_optional();
1458 }
1459 
1460 // Public methods.
1461 
1462 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1463   CollectedHeap(),
1464   _young_gen_sampling_thread(NULL),
1465   _collector_policy(collector_policy),
1466   _soft_ref_policy(),
1467   _memory_manager("G1 Young Generation", "end of minor GC"),
1468   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1469   _eden_pool(NULL),
1470   _survivor_pool(NULL),
1471   _old_pool(NULL),
1472   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1473   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1474   _g1_policy(create_g1_policy(_gc_timer_stw)),
1475   _collection_set(this, _g1_policy),
1476   _dirty_card_queue_set(false),
1477   _is_alive_closure_cm(this),
1478   _is_alive_closure_stw(this),
1479   _ref_processor_cm(NULL),
1480   _ref_processor_stw(NULL),
1481   _bot(NULL),
1482   _hot_card_cache(NULL),
1483   _g1_rem_set(NULL),
1484   _cr(NULL),
1485   _g1mm(NULL),
1486   _preserved_marks_set(true /* in_c_heap */),


1875   // STW ref processor
1876   _ref_processor_stw =
1877     new ReferenceProcessor(mr,    // span
1878                            mt_processing,
1879                                 // mt processing
1880                            ParallelGCThreads,
1881                                 // degree of mt processing
1882                            (ParallelGCThreads > 1),
1883                                 // mt discovery
1884                            ParallelGCThreads,
1885                                 // degree of mt discovery
1886                            true,
1887                                 // Reference discovery is atomic
1888                            &_is_alive_closure_stw);
1889                                 // is alive closure
1890                                 // (for efficiency/performance)
1891 }
1892 
1893 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1894   return _collector_policy;
1895 }
1896 
1897 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1898   return &_soft_ref_policy;
1899 }
1900 
1901 size_t G1CollectedHeap::capacity() const {
1902   return _hrm.length() * HeapRegion::GrainBytes;
1903 }
1904 
1905 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1906   return _hrm.total_free_bytes();
1907 }
1908 
1909 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1910   hr->reset_gc_time_stamp();
1911 }
1912 
1913 #ifndef PRODUCT
1914 
1915 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1916 private:
1917   unsigned _gc_time_stamp;
1918   bool _failures;


< prev index next >