1170 }
1171
1172 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1173 gc_timer->register_gc_start();
1174
1175 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1176 GCIdMark gc_id_mark;
1177 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1178
1179 SvcGCMarker sgcm(SvcGCMarker::FULL);
1180 ResourceMark rm;
1181
1182 print_heap_before_gc();
1183 print_heap_regions();
1184 trace_heap_before_gc(gc_tracer);
1185
1186 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1187
1188 _verifier->verify_region_sets_optional();
1189
1190 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1191 collector_policy()->should_clear_all_soft_refs();
1192
1193 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1194
1195 {
1196 IsGCActiveMark x;
1197
1198 // Timing
1199 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1200 GCTraceCPUTime tcpu;
1201
1202 {
1203 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1204 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1205 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1206
1207 G1HeapTransition heap_transition(this);
1208 g1_policy()->record_full_collection_start();
1209
1210 // Note: When we have a more flexible GC logging framework that
1211 // allows us to add optional attributes to a GC log record we
1212 // could consider timing and reporting how long we wait in the
1213 // following two methods.
1214 wait_while_free_regions_coming();
1254 // We may have added regions to the current incremental collection
1255 // set between the last GC or pause and now. We need to clear the
1256 // incremental collection set and then start rebuilding it afresh
1257 // after this full GC.
1258 abandon_collection_set(collection_set());
1259
1260 tear_down_region_sets(false /* free_list_only */);
1261 collector_state()->set_gcs_are_young(true);
1262
1263 // See the comments in g1CollectedHeap.hpp and
1264 // G1CollectedHeap::ref_processing_init() about
1265 // how reference processing currently works in G1.
1266
1267 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1268 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1269
1270 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1271 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1272
1273 ref_processor_stw()->enable_discovery();
1274 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1275
1276 // Do collection work
1277 {
1278 HandleMark hm; // Discard invalid handles created during gc
1279 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1280 }
1281
1282 assert(num_free_regions() == 0, "we should not have added any free regions");
1283 rebuild_region_sets(false /* free_list_only */);
1284
1285 // Enqueue any discovered reference objects that have
1286 // not been removed from the discovered lists.
1287 ref_processor_stw()->enqueue_discovered_references();
1288
1289 #if defined(COMPILER2) || INCLUDE_JVMCI
1290 DerivedPointerTable::update_pointers();
1291 #endif
1292
1293 MemoryService::track_memory_usage();
1294
1295 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1296 ref_processor_stw()->verify_no_references_recorded();
1297
1298 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1299 ClassLoaderDataGraph::purge();
1551 true, /* clear_all_soft_refs */
1552 true, /* expect_null_mutator_alloc_region */
1553 succeeded);
1554
1555 if (result != NULL || !*succeeded) {
1556 return result;
1557 }
1558
1559 // Attempts to allocate, no GC
1560 result = satisfy_failed_allocation_helper(word_size,
1561 context,
1562 false, /* do_gc */
1563 false, /* clear_all_soft_refs */
1564 true, /* expect_null_mutator_alloc_region */
1565 succeeded);
1566
1567 if (result != NULL) {
1568 assert(*succeeded, "sanity");
1569 return result;
1570 }
1571
1572 assert(!collector_policy()->should_clear_all_soft_refs(),
1573 "Flag should have been handled and cleared prior to this point");
1574
1575 // What else? We might try synchronous finalization later. If the total
1576 // space available is large enough for the allocation, then a more
1577 // complete compaction phase than we've tried so far might be
1578 // appropriate.
1579 assert(*succeeded, "sanity");
1580 return NULL;
1581 }
1582
1583 // Attempting to expand the heap sufficiently
1584 // to support an allocation of the given "word_size". If
1585 // successful, perform the allocation and return the address of the
1586 // allocated block, or else "NULL".
1587
1588 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1589 assert_at_safepoint(true /* should_be_vm_thread */);
1590
1591 _verifier->verify_region_sets_optional();
1592
1593 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
|
1170 }
1171
1172 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1173 gc_timer->register_gc_start();
1174
1175 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1176 GCIdMark gc_id_mark;
1177 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1178
1179 SvcGCMarker sgcm(SvcGCMarker::FULL);
1180 ResourceMark rm;
1181
1182 print_heap_before_gc();
1183 print_heap_regions();
1184 trace_heap_before_gc(gc_tracer);
1185
1186 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1187
1188 _verifier->verify_region_sets_optional();
1189
1190 {
1191 IsGCActiveMark x;
1192
1193 // Timing
1194 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1195 GCTraceCPUTime tcpu;
1196
1197 {
1198 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1199 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1200 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1201
1202 G1HeapTransition heap_transition(this);
1203 g1_policy()->record_full_collection_start();
1204
1205 // Note: When we have a more flexible GC logging framework that
1206 // allows us to add optional attributes to a GC log record we
1207 // could consider timing and reporting how long we wait in the
1208 // following two methods.
1209 wait_while_free_regions_coming();
1249 // We may have added regions to the current incremental collection
1250 // set between the last GC or pause and now. We need to clear the
1251 // incremental collection set and then start rebuilding it afresh
1252 // after this full GC.
1253 abandon_collection_set(collection_set());
1254
1255 tear_down_region_sets(false /* free_list_only */);
1256 collector_state()->set_gcs_are_young(true);
1257
1258 // See the comments in g1CollectedHeap.hpp and
1259 // G1CollectedHeap::ref_processing_init() about
1260 // how reference processing currently works in G1.
1261
1262 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1263 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1264
1265 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1266 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1267
1268 ref_processor_stw()->enable_discovery();
1269 ref_processor_stw()->setup_policy(clear_all_soft_refs);
1270
1271 // Do collection work
1272 {
1273 HandleMark hm; // Discard invalid handles created during gc
1274 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), clear_all_soft_refs);
1275 }
1276
1277 assert(num_free_regions() == 0, "we should not have added any free regions");
1278 rebuild_region_sets(false /* free_list_only */);
1279
1280 // Enqueue any discovered reference objects that have
1281 // not been removed from the discovered lists.
1282 ref_processor_stw()->enqueue_discovered_references();
1283
1284 #if defined(COMPILER2) || INCLUDE_JVMCI
1285 DerivedPointerTable::update_pointers();
1286 #endif
1287
1288 MemoryService::track_memory_usage();
1289
1290 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1291 ref_processor_stw()->verify_no_references_recorded();
1292
1293 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1294 ClassLoaderDataGraph::purge();
1546 true, /* clear_all_soft_refs */
1547 true, /* expect_null_mutator_alloc_region */
1548 succeeded);
1549
1550 if (result != NULL || !*succeeded) {
1551 return result;
1552 }
1553
1554 // Attempts to allocate, no GC
1555 result = satisfy_failed_allocation_helper(word_size,
1556 context,
1557 false, /* do_gc */
1558 false, /* clear_all_soft_refs */
1559 true, /* expect_null_mutator_alloc_region */
1560 succeeded);
1561
1562 if (result != NULL) {
1563 assert(*succeeded, "sanity");
1564 return result;
1565 }
1566
1567 // What else? We might try synchronous finalization later. If the total
1568 // space available is large enough for the allocation, then a more
1569 // complete compaction phase than we've tried so far might be
1570 // appropriate.
1571 assert(*succeeded, "sanity");
1572 return NULL;
1573 }
1574
1575 // Attempting to expand the heap sufficiently
1576 // to support an allocation of the given "word_size". If
1577 // successful, perform the allocation and return the address of the
1578 // allocated block, or else "NULL".
1579
1580 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1581 assert_at_safepoint(true /* should_be_vm_thread */);
1582
1583 _verifier->verify_region_sets_optional();
1584
1585 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
|