< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




1268 
1269   SvcGCMarker sgcm(SvcGCMarker::FULL);
1270   ResourceMark rm;
1271 
1272   print_heap_before_gc();
1273   trace_heap_before_gc(gc_tracer);
1274 
1275   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1276 
1277   verify_region_sets_optional();
1278 
1279   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1280                            collector_policy()->should_clear_all_soft_refs();
1281 
1282   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1283 
1284   {
1285     IsGCActiveMark x;
1286 
1287     // Timing
1288     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1289     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1290 
1291     {
1292       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1293       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1294       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1295 
1296       double start = os::elapsedTime();
1297       g1_policy()->record_full_collection_start();
1298 
1299       // Note: When we have a more flexible GC logging framework that
1300       // allows us to add optional attributes to a GC log record we
1301       // could consider timing and reporting how long we wait in the
1302       // following two methods.
1303       wait_while_free_regions_coming();
1304       // If we start the compaction before the CM threads finish
1305       // scanning the root regions we might trip them over as we'll
1306       // be moving objects / updating references. So let's wait until
1307       // they are done. By telling them to abort, they should complete
1308       // early.


2312     }
2313     return false;
2314   }
2315   size_t result() { return _used; }
2316 };
2317 
2318 size_t G1CollectedHeap::recalculate_used() const {
2319   double recalculate_used_start = os::elapsedTime();
2320 
2321   SumUsedClosure blk;
2322   heap_region_iterate(&blk);
2323 
2324   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2325   return blk.result();
2326 }
2327 
2328 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2329   switch (cause) {
2330     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2331     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;

2332     case GCCause::_g1_humongous_allocation: return true;
2333     case GCCause::_update_allocation_context_stats_inc: return true;
2334     default:                                return false;
2335   }
2336 }
2337 
2338 #ifndef PRODUCT
2339 void G1CollectedHeap::allocate_dummy_regions() {
2340   // Let's fill up most of the region
2341   size_t word_size = HeapRegion::GrainWords - 1024;
2342   // And as a result the region we'll allocate will be humongous.
2343   guarantee(isHumongous(word_size), "sanity");
2344 
2345   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2346     // Let's use the existing mechanism for the allocation
2347     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2348                                                  AllocationContext::system());
2349     if (dummy_obj != NULL) {
2350       MemRegion mr(dummy_obj, word_size);
2351       CollectedHeap::fill_with_object(mr);




1268 
1269   SvcGCMarker sgcm(SvcGCMarker::FULL);
1270   ResourceMark rm;
1271 
1272   print_heap_before_gc();
1273   trace_heap_before_gc(gc_tracer);
1274 
1275   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1276 
1277   verify_region_sets_optional();
1278 
1279   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1280                            collector_policy()->should_clear_all_soft_refs();
1281 
1282   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1283 
1284   {
1285     IsGCActiveMark x;
1286 
1287     // Timing
1288     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1289     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1290 
1291     {
1292       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1293       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1294       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1295 
1296       double start = os::elapsedTime();
1297       g1_policy()->record_full_collection_start();
1298 
1299       // Note: When we have a more flexible GC logging framework that
1300       // allows us to add optional attributes to a GC log record we
1301       // could consider timing and reporting how long we wait in the
1302       // following two methods.
1303       wait_while_free_regions_coming();
1304       // If we start the compaction before the CM threads finish
1305       // scanning the root regions we might trip them over as we'll
1306       // be moving objects / updating references. So let's wait until
1307       // they are done. By telling them to abort, they should complete
1308       // early.


2312     }
2313     return false;
2314   }
2315   size_t result() { return _used; }
2316 };
2317 
2318 size_t G1CollectedHeap::recalculate_used() const {
2319   double recalculate_used_start = os::elapsedTime();
2320 
2321   SumUsedClosure blk;
2322   heap_region_iterate(&blk);
2323 
2324   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2325   return blk.result();
2326 }
2327 
2328 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2329   switch (cause) {
2330     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2331     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2332     case GCCause::_dcmd_gc_run:             return ExplicitGCInvokesConcurrent;
2333     case GCCause::_g1_humongous_allocation: return true;
2334     case GCCause::_update_allocation_context_stats_inc: return true;
2335     default:                                return false;
2336   }
2337 }
2338 
2339 #ifndef PRODUCT
2340 void G1CollectedHeap::allocate_dummy_regions() {
2341   // Let's fill up most of the region
2342   size_t word_size = HeapRegion::GrainWords - 1024;
2343   // And as a result the region we'll allocate will be humongous.
2344   guarantee(isHumongous(word_size), "sanity");
2345 
2346   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2347     // Let's use the existing mechanism for the allocation
2348     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2349                                                  AllocationContext::system());
2350     if (dummy_obj != NULL) {
2351       MemRegion mr(dummy_obj, word_size);
2352       CollectedHeap::fill_with_object(mr);


< prev index next >