1204
1205 SvcGCMarker sgcm(SvcGCMarker::FULL);
1206 ResourceMark rm;
1207
1208 print_heap_before_gc();
1209 trace_heap_before_gc(gc_tracer);
1210
1211 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1212
1213 verify_region_sets_optional();
1214
1215 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1216 collector_policy()->should_clear_all_soft_refs();
1217
1218 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1219
1220 {
1221 IsGCActiveMark x;
1222
1223 // Timing
1224 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1225 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1226
1227 {
1228 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1229 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1230 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1231
1232 g1_policy()->record_full_collection_start();
1233
1234 // Note: When we have a more flexible GC logging framework that
1235 // allows us to add optional attributes to a GC log record we
1236 // could consider timing and reporting how long we wait in the
1237 // following two methods.
1238 wait_while_free_regions_coming();
1239 // If we start the compaction before the CM threads finish
1240 // scanning the root regions we might trip them over as we'll
1241 // be moving objects / updating references. So let's wait until
1242 // they are done. By telling them to abort, they should complete
1243 // early.
1244 _cm->root_regions()->abort();
2235 }
2236 return false;
2237 }
2238 size_t result() { return _used; }
2239 };
2240
2241 size_t G1CollectedHeap::recalculate_used() const {
2242 double recalculate_used_start = os::elapsedTime();
2243
2244 SumUsedClosure blk;
2245 heap_region_iterate(&blk);
2246
2247 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2248 return blk.result();
2249 }
2250
2251 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2252 switch (cause) {
2253 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2254 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2255 case GCCause::_g1_humongous_allocation: return true;
2256 case GCCause::_update_allocation_context_stats_inc: return true;
2257 case GCCause::_wb_conc_mark: return true;
2258 default: return false;
2259 }
2260 }
2261
2262 #ifndef PRODUCT
2263 void G1CollectedHeap::allocate_dummy_regions() {
2264 // Let's fill up most of the region
2265 size_t word_size = HeapRegion::GrainWords - 1024;
2266 // And as a result the region we'll allocate will be humongous.
2267 guarantee(is_humongous(word_size), "sanity");
2268
2269 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2270 // Let's use the existing mechanism for the allocation
2271 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2272 AllocationContext::system());
2273 if (dummy_obj != NULL) {
2274 MemRegion mr(dummy_obj, word_size);
|
1204
1205 SvcGCMarker sgcm(SvcGCMarker::FULL);
1206 ResourceMark rm;
1207
1208 print_heap_before_gc();
1209 trace_heap_before_gc(gc_tracer);
1210
1211 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1212
1213 verify_region_sets_optional();
1214
1215 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1216 collector_policy()->should_clear_all_soft_refs();
1217
1218 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1219
1220 {
1221 IsGCActiveMark x;
1222
1223 // Timing
1224 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1225 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1226
1227 {
1228 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1229 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1230 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1231
1232 g1_policy()->record_full_collection_start();
1233
1234 // Note: When we have a more flexible GC logging framework that
1235 // allows us to add optional attributes to a GC log record we
1236 // could consider timing and reporting how long we wait in the
1237 // following two methods.
1238 wait_while_free_regions_coming();
1239 // If we start the compaction before the CM threads finish
1240 // scanning the root regions we might trip them over as we'll
1241 // be moving objects / updating references. So let's wait until
1242 // they are done. By telling them to abort, they should complete
1243 // early.
1244 _cm->root_regions()->abort();
2235 }
2236 return false;
2237 }
2238 size_t result() { return _used; }
2239 };
2240
2241 size_t G1CollectedHeap::recalculate_used() const {
2242 double recalculate_used_start = os::elapsedTime();
2243
2244 SumUsedClosure blk;
2245 heap_region_iterate(&blk);
2246
2247 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2248 return blk.result();
2249 }
2250
2251 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2252 switch (cause) {
2253 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2254 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2255 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2256 case GCCause::_g1_humongous_allocation: return true;
2257 case GCCause::_update_allocation_context_stats_inc: return true;
2258 case GCCause::_wb_conc_mark: return true;
2259 default: return false;
2260 }
2261 }
2262
2263 #ifndef PRODUCT
2264 void G1CollectedHeap::allocate_dummy_regions() {
2265 // Let's fill up most of the region
2266 size_t word_size = HeapRegion::GrainWords - 1024;
2267 // And as a result the region we'll allocate will be humongous.
2268 guarantee(is_humongous(word_size), "sanity");
2269
2270 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2271 // Let's use the existing mechanism for the allocation
2272 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2273 AllocationContext::system());
2274 if (dummy_obj != NULL) {
2275 MemRegion mr(dummy_obj, word_size);
|