< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




1166   SvcGCMarker sgcm(SvcGCMarker::FULL);
1167   ResourceMark rm;
1168 
1169   G1Log::update_level();
1170   print_heap_before_gc();
1171   trace_heap_before_gc(gc_tracer);
1172 
1173   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1174 
1175   verify_region_sets_optional();
1176 
1177   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1178                            collector_policy()->should_clear_all_soft_refs();
1179 
1180   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1181 
1182   {
1183     IsGCActiveMark x;
1184 
1185     // Timing
1186     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1187     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1188 
1189     {
1190       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1191       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1192       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1193 
1194       g1_policy()->record_full_collection_start();
1195 
1196       // Note: When we have a more flexible GC logging framework that
1197       // allows us to add optional attributes to a GC log record we
1198       // could consider timing and reporting how long we wait in the
1199       // following two methods.
1200       wait_while_free_regions_coming();
1201       // If we start the compaction before the CM threads finish
1202       // scanning the root regions we might trip them over as we'll
1203       // be moving objects / updating references. So let's wait until
1204       // they are done. By telling them to abort, they should complete
1205       // early.
1206       _cm->root_regions()->abort();


2182     }
2183     return false;
2184   }
2185   size_t result() { return _used; }
2186 };
2187 
2188 size_t G1CollectedHeap::recalculate_used() const {
2189   double recalculate_used_start = os::elapsedTime();
2190 
2191   SumUsedClosure blk;
2192   heap_region_iterate(&blk);
2193 
2194   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2195   return blk.result();
2196 }
2197 
2198 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2199   switch (cause) {
2200     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2201     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;

2202     case GCCause::_g1_humongous_allocation: return true;
2203     case GCCause::_update_allocation_context_stats_inc: return true;
2204     case GCCause::_wb_conc_mark:            return true;
2205     default:                                return false;
2206   }
2207 }
2208 
2209 #ifndef PRODUCT
2210 void G1CollectedHeap::allocate_dummy_regions() {
2211   // Let's fill up most of the region
2212   size_t word_size = HeapRegion::GrainWords - 1024;
2213   // And as a result the region we'll allocate will be humongous.
2214   guarantee(is_humongous(word_size), "sanity");
2215 
2216   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2217     // Let's use the existing mechanism for the allocation
2218     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2219                                                  AllocationContext::system());
2220     if (dummy_obj != NULL) {
2221       MemRegion mr(dummy_obj, word_size);




1166   SvcGCMarker sgcm(SvcGCMarker::FULL);
1167   ResourceMark rm;
1168 
1169   G1Log::update_level();
1170   print_heap_before_gc();
1171   trace_heap_before_gc(gc_tracer);
1172 
1173   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1174 
1175   verify_region_sets_optional();
1176 
1177   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1178                            collector_policy()->should_clear_all_soft_refs();
1179 
1180   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1181 
1182   {
1183     IsGCActiveMark x;
1184 
1185     // Timing
1186     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1187     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1188 
1189     {
1190       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1191       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1192       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1193 
1194       g1_policy()->record_full_collection_start();
1195 
1196       // Note: When we have a more flexible GC logging framework that
1197       // allows us to add optional attributes to a GC log record we
1198       // could consider timing and reporting how long we wait in the
1199       // following two methods.
1200       wait_while_free_regions_coming();
1201       // If we start the compaction before the CM threads finish
1202       // scanning the root regions we might trip them over as we'll
1203       // be moving objects / updating references. So let's wait until
1204       // they are done. By telling them to abort, they should complete
1205       // early.
1206       _cm->root_regions()->abort();


2182     }
2183     return false;
2184   }
2185   size_t result() { return _used; }
2186 };
2187 
2188 size_t G1CollectedHeap::recalculate_used() const {
2189   double recalculate_used_start = os::elapsedTime();
2190 
2191   SumUsedClosure blk;
2192   heap_region_iterate(&blk);
2193 
2194   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2195   return blk.result();
2196 }
2197 
2198 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2199   switch (cause) {
2200     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2201     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2202     case GCCause::_dcmd_gc_run:             return ExplicitGCInvokesConcurrent;
2203     case GCCause::_g1_humongous_allocation: return true;
2204     case GCCause::_update_allocation_context_stats_inc: return true;
2205     case GCCause::_wb_conc_mark:            return true;
2206     default:                                return false;
2207   }
2208 }
2209 
2210 #ifndef PRODUCT
2211 void G1CollectedHeap::allocate_dummy_regions() {
2212   // Let's fill up most of the region
2213   size_t word_size = HeapRegion::GrainWords - 1024;
2214   // And as a result the region we'll allocate will be humongous.
2215   guarantee(is_humongous(word_size), "sanity");
2216 
2217   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2218     // Let's use the existing mechanism for the allocation
2219     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2220                                                  AllocationContext::system());
2221     if (dummy_obj != NULL) {
2222       MemRegion mr(dummy_obj, word_size);


< prev index next >