1280 SvcGCMarker sgcm(SvcGCMarker::FULL);
1281 ResourceMark rm;
1282
1283 print_heap_before_gc();
1284 trace_heap_before_gc(gc_tracer);
1285
1286 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1287
1288 verify_region_sets_optional();
1289
1290 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1291 collector_policy()->should_clear_all_soft_refs();
1292
1293 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1294
1295 {
1296 IsGCActiveMark x;
1297
1298 // Timing
1299 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1300 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1301 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1302
1303 {
1304 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1305 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1306 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1307
1308 double start = os::elapsedTime();
1309 g1_policy()->record_full_collection_start();
1310
1311 // Note: When we have a more flexible GC logging framework that
1312 // allows us to add optional attributes to a GC log record we
1313 // could consider timing and reporting how long we wait in the
1314 // following two methods.
1315 wait_while_free_regions_coming();
1316 // If we start the compaction before the CM threads finish
1317 // scanning the root regions we might trip them over as we'll
1318 // be moving objects / updating references. So let's wait until
1319 // they are done. By telling them to abort, they should complete
1320 // early.
|
1280 SvcGCMarker sgcm(SvcGCMarker::FULL);
1281 ResourceMark rm;
1282
1283 print_heap_before_gc();
1284 trace_heap_before_gc(gc_tracer);
1285
1286 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1287
1288 verify_region_sets_optional();
1289
1290 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1291 collector_policy()->should_clear_all_soft_refs();
1292
1293 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1294
1295 {
1296 IsGCActiveMark x;
1297
1298 // Timing
1299 assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1300 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1301
1302 {
1303 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1304 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1305 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1306
1307 double start = os::elapsedTime();
1308 g1_policy()->record_full_collection_start();
1309
1310 // Note: When we have a more flexible GC logging framework that
1311 // allows us to add optional attributes to a GC log record we
1312 // could consider timing and reporting how long we wait in the
1313 // following two methods.
1314 wait_while_free_regions_coming();
1315 // If we start the compaction before the CM threads finish
1316 // scanning the root regions we might trip them over as we'll
1317 // be moving objects / updating references. So let's wait until
1318 // they are done. By telling them to abort, they should complete
1319 // early.
|