18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/concurrentG1Refine.hpp"
32 #include "gc/g1/concurrentG1RefineThread.hpp"
33 #include "gc/g1/concurrentMarkThread.inline.hpp"
34 #include "gc/g1/g1Allocator.inline.hpp"
35 #include "gc/g1/g1CollectedHeap.inline.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ErgoVerbose.hpp"
39 #include "gc/g1/g1EvacFailure.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1Log.hpp"
42 #include "gc/g1/g1MarkSweep.hpp"
43 #include "gc/g1/g1OopClosures.inline.hpp"
44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
46 #include "gc/g1/g1RemSet.inline.hpp"
47 #include "gc/g1/g1RootClosures.hpp"
48 #include "gc/g1/g1RootProcessor.hpp"
49 #include "gc/g1/g1StringDedup.hpp"
50 #include "gc/g1/g1YCTypes.hpp"
51 #include "gc/g1/heapRegion.inline.hpp"
52 #include "gc/g1/heapRegionRemSet.hpp"
53 #include "gc/g1/heapRegionSet.inline.hpp"
54 #include "gc/g1/suspendibleThreadSet.hpp"
55 #include "gc/g1/vm_operations_g1.hpp"
56 #include "gc/shared/gcHeapSummary.hpp"
57 #include "gc/shared/gcId.hpp"
58 #include "gc/shared/gcLocker.inline.hpp"
59 #include "gc/shared/gcTimer.hpp"
60 #include "gc/shared/gcTrace.hpp"
61 #include "gc/shared/gcTraceTime.hpp"
62 #include "gc/shared/generationSpec.hpp"
63 #include "gc/shared/isGCActiveMark.hpp"
64 #include "gc/shared/referenceProcessor.hpp"
65 #include "gc/shared/taskqueue.inline.hpp"
66 #include "memory/allocation.hpp"
67 #include "memory/iterator.hpp"
68 #include "oops/oop.inline.hpp"
69 #include "runtime/atomic.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/orderAccess.inline.hpp"
72 #include "runtime/vmThread.hpp"
73 #include "utilities/globalDefinitions.hpp"
74 #include "utilities/stack.inline.hpp"
75
76 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
77
78 // INVARIANTS/NOTES
79 //
80 // All allocation activity covered by the G1CollectedHeap interface is
81 // serialized by acquiring the HeapLock. This happens in mem_allocate
82 // and allocate_new_tlab, which are the "entry" points to the
83 // allocation code from the rest of the JVM. (Note that this does not
84 // apply to TLAB allocation, which is not part of this interface: it
85 // is done by clients of this interface.)
187 } while (hr != head);
188 assert(hr != NULL, "invariant");
189 hr->set_next_dirty_cards_region(NULL);
190 return hr;
191 }
192
193 // Returns true if the reference points to an object that
194 // can move in an incremental collection.
195 bool G1CollectedHeap::is_scavengable(const void* p) {
196 HeapRegion* hr = heap_region_containing(p);
197 return !hr->is_pinned();
198 }
199
200 // Private methods.
201
202 HeapRegion*
203 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
204 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
205 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
206 if (!_secondary_free_list.is_empty()) {
207 if (G1ConcRegionFreeingVerbose) {
208 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
209 "secondary_free_list has %u entries",
210 _secondary_free_list.length());
211 }
212 // It looks as if there are free regions available on the
213 // secondary_free_list. Let's move them to the free_list and try
214 // again to allocate from it.
215 append_secondary_free_list();
216
217 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
218 "empty we should have moved at least one entry to the free_list");
219 HeapRegion* res = _hrm.allocate_free_region(is_old);
220 if (G1ConcRegionFreeingVerbose) {
221 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
222 "allocated " HR_FORMAT " from secondary_free_list",
223 HR_FORMAT_PARAMS(res));
224 }
225 return res;
226 }
227
228 // Wait here until we get notified either when (a) there are no
229 // more free regions coming or (b) some regions have been moved on
230 // the secondary_free_list.
231 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
232 }
233
234 if (G1ConcRegionFreeingVerbose) {
235 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
236 "could not allocate from secondary_free_list");
237 }
238 return NULL;
239 }
240
241 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
242 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
243 "the only time we use this to allocate a humongous region is "
244 "when we are allocating a single humongous region");
245
246 HeapRegion* res;
247 if (G1StressConcRegionFreeing) {
248 if (!_secondary_free_list.is_empty()) {
249 if (G1ConcRegionFreeingVerbose) {
250 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
251 "forced to look at the secondary_free_list");
252 }
253 res = new_region_try_secondary_free_list(is_old);
254 if (res != NULL) {
255 return res;
256 }
257 }
258 }
259
260 res = _hrm.allocate_free_region(is_old);
261
262 if (res == NULL) {
263 if (G1ConcRegionFreeingVerbose) {
264 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
265 "res == NULL, trying the secondary_free_list");
266 }
267 res = new_region_try_secondary_free_list(is_old);
268 }
269 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
270 // Currently, only attempts to allocate GC alloc regions set
271 // do_expand to true. So, we should only reach here during a
272 // safepoint. If this assumption changes we might have to
273 // reconsider the use of _expand_heap_after_alloc_failure.
274 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
275
276 ergo_verbose1(ErgoHeapSizing,
277 "attempt heap expansion",
278 ergo_format_reason("region allocation request failed")
279 ergo_format_byte("allocation request"),
280 word_size * HeapWordSize);
281 if (expand(word_size * HeapWordSize)) {
282 // Given that expand() succeeded in expanding the heap, and we
283 // always expand the heap by an amount aligned to the heap
284 // region size, the free list should in theory not be empty.
285 // In either case allocate_free_region() will check for NULL.
286 res = _hrm.allocate_free_region(is_old);
287 } else {
288 _expand_heap_after_alloc_failure = false;
289 }
290 }
291 return res;
292 }
293
294 HeapWord*
295 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
296 uint num_regions,
297 size_t word_size,
298 AllocationContext_t context) {
299 assert(first != G1_NO_HRM_INDEX, "pre-condition");
300 assert(is_humongous(word_size), "word_size should be humongous");
431 // potentially waits for regions from the secondary free list.
432 wait_while_free_regions_coming();
433 append_secondary_free_list_if_not_empty_with_lock();
434
435 // Policy: Try only empty regions (i.e. already committed first). Maybe we
436 // are lucky enough to find some.
437 first = _hrm.find_contiguous_only_empty(obj_regions);
438 if (first != G1_NO_HRM_INDEX) {
439 _hrm.allocate_free_regions_starting_at(first, obj_regions);
440 }
441 }
442
443 if (first == G1_NO_HRM_INDEX) {
444 // Policy: We could not find enough regions for the humongous object in the
445 // free list. Look through the heap to find a mix of free and uncommitted regions.
446 // If so, try expansion.
447 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
448 if (first != G1_NO_HRM_INDEX) {
449 // We found something. Make sure these regions are committed, i.e. expand
450 // the heap. Alternatively we could do a defragmentation GC.
451 ergo_verbose1(ErgoHeapSizing,
452 "attempt heap expansion",
453 ergo_format_reason("humongous allocation request failed")
454 ergo_format_byte("allocation request"),
455 word_size * HeapWordSize);
456
457 _hrm.expand_at(first, obj_regions);
458 g1_policy()->record_new_heap_size(num_regions());
459
460 #ifdef ASSERT
461 for (uint i = first; i < first + obj_regions; ++i) {
462 HeapRegion* hr = region_at(i);
463 assert(hr->is_free(), "sanity");
464 assert(hr->is_empty(), "sanity");
465 assert(is_on_master_free_list(hr), "sanity");
466 }
467 #endif
468 _hrm.allocate_free_regions_starting_at(first, obj_regions);
469 } else {
470 // Policy: Potentially trigger a defragmentation GC.
471 }
472 }
473
474 HeapWord* result = NULL;
475 if (first != G1_NO_HRM_INDEX) {
476 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
754 HeapRegion* start_region = _hrm.addr_to_region(start_address);
755 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
756 start_address = start_region->end();
757 if (start_address > last_address) {
758 increase_used(word_size * HeapWordSize);
759 start_region->set_top(last_address + 1);
760 continue;
761 }
762 start_region->set_top(start_address);
763 curr_range = MemRegion(start_address, last_address + 1);
764 start_region = _hrm.addr_to_region(start_address);
765 }
766
767 // Perform the actual region allocation, exiting if it fails.
768 // Then note how much new space we have allocated.
769 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
770 return false;
771 }
772 increase_used(word_size * HeapWordSize);
773 if (commits != 0) {
774 ergo_verbose1(ErgoHeapSizing,
775 "attempt heap expansion",
776 ergo_format_reason("allocate archive regions")
777 ergo_format_byte("total size"),
778 HeapRegion::GrainWords * HeapWordSize * commits);
779 }
780
781 // Mark each G1 region touched by the range as archive, add it to the old set,
782 // and set the allocation context and top.
783 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
784 HeapRegion* last_region = _hrm.addr_to_region(last_address);
785 prev_last_region = last_region;
786
787 while (curr_region != NULL) {
788 assert(curr_region->is_empty() && !curr_region->is_pinned(),
789 "Region already in use (index %u)", curr_region->hrm_index());
790 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
791 curr_region->set_allocation_context(AllocationContext::system());
792 curr_region->set_archive();
793 _old_set.add(curr_region);
794 if (curr_region != last_region) {
795 curr_region->set_top(curr_region->end());
796 curr_region = _hrm.next_region_in_heap(curr_region);
797 } else {
798 curr_region->set_top(last_address + 1);
939 guarantee(curr_region->is_archive(),
940 "Expected archive region at index %u", curr_region->hrm_index());
941 uint curr_index = curr_region->hrm_index();
942 _old_set.remove(curr_region);
943 curr_region->set_free();
944 curr_region->set_top(curr_region->bottom());
945 if (curr_region != last_region) {
946 curr_region = _hrm.next_region_in_heap(curr_region);
947 } else {
948 curr_region = NULL;
949 }
950 _hrm.shrink_at(curr_index, 1);
951 uncommitted_regions++;
952 }
953
954 // Notify mark-sweep that this is no longer an archive range.
955 G1MarkSweep::set_range_archive(ranges[i], false);
956 }
957
958 if (uncommitted_regions != 0) {
959 ergo_verbose1(ErgoHeapSizing,
960 "attempt heap shrinking",
961 ergo_format_reason("uncommitted archive regions")
962 ergo_format_byte("total size"),
963 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
964 }
965 decrease_used(size_used);
966 }
967
968 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
969 uint* gc_count_before_ret,
970 uint* gclocker_retry_count_ret) {
971 // The structure of this method has a lot of similarities to
972 // attempt_allocation_slow(). The reason these two were not merged
973 // into a single one is that such a method would require several "if
974 // allocation is not humongous do this, otherwise do that"
975 // conditional paths which would obscure its flow. In fact, an early
976 // version of this code did use a unified method which was harder to
977 // follow and, as a result, it had subtle bugs that were hard to
978 // track down. So keeping these two methods separate allows each to
979 // be more readable. It will be good to keep these two in sync as
980 // much as possible.
981
982 assert_heap_not_locked_and_not_at_safepoint();
1180 // We only generate output for non-empty regions.
1181 } else if (hr->is_starts_humongous()) {
1182 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1183 } else if (hr->is_continues_humongous()) {
1184 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1185 } else if (hr->is_archive()) {
1186 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1187 } else if (hr->is_old()) {
1188 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1189 } else {
1190 ShouldNotReachHere();
1191 }
1192 return false;
1193 }
1194
1195 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1196 : _hr_printer(hr_printer) { }
1197 };
1198
1199 void G1CollectedHeap::print_hrm_post_compaction() {
1200 PostCompactionPrinterClosure cl(hr_printer());
1201 heap_region_iterate(&cl);
1202 }
1203
1204 bool G1CollectedHeap::do_collection(bool explicit_gc,
1205 bool clear_all_soft_refs,
1206 size_t word_size) {
1207 assert_at_safepoint(true /* should_be_vm_thread */);
1208
1209 if (GC_locker::check_active_before_gc()) {
1210 return false;
1211 }
1212
1213 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1214 gc_timer->register_gc_start();
1215
1216 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1217 GCIdMark gc_id_mark;
1218 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1219
1220 SvcGCMarker sgcm(SvcGCMarker::FULL);
1221 ResourceMark rm;
1222
1223 G1Log::update_level();
1224 print_heap_before_gc();
1225 trace_heap_before_gc(gc_tracer);
1226
1227 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1228
1229 verify_region_sets_optional();
1230
1231 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1232 collector_policy()->should_clear_all_soft_refs();
1233
1234 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1235
1236 {
1237 IsGCActiveMark x;
1238
1239 // Timing
1240 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1241 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1242
1243 {
1244 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1245 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1246 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1247
1248 g1_policy()->record_full_collection_start();
1249
1250 // Note: When we have a more flexible GC logging framework that
1251 // allows us to add optional attributes to a GC log record we
1252 // could consider timing and reporting how long we wait in the
1253 // following two methods.
1254 wait_while_free_regions_coming();
1255 // If we start the compaction before the CM threads finish
1256 // scanning the root regions we might trip them over as we'll
1257 // be moving objects / updating references. So let's wait until
1258 // they are done. By telling them to abort, they should complete
1259 // early.
1260 _cm->root_regions()->abort();
1261 _cm->root_regions()->wait_until_scan_finished();
1262 append_secondary_free_list_if_not_empty_with_lock();
1263
1264 gc_prologue(true);
1275 #if defined(COMPILER2) || INCLUDE_JVMCI
1276 DerivedPointerTable::clear();
1277 #endif
1278
1279 // Disable discovery and empty the discovered lists
1280 // for the CM ref processor.
1281 ref_processor_cm()->disable_discovery();
1282 ref_processor_cm()->abandon_partial_discovery();
1283 ref_processor_cm()->verify_no_references_recorded();
1284
1285 // Abandon current iterations of concurrent marking and concurrent
1286 // refinement, if any are in progress. We have to do this before
1287 // wait_until_scan_finished() below.
1288 concurrent_mark()->abort();
1289
1290 // Make sure we'll choose a new allocation region afterwards.
1291 _allocator->release_mutator_alloc_region();
1292 _allocator->abandon_gc_alloc_regions();
1293 g1_rem_set()->cleanupHRRS();
1294
1295 // We should call this after we retire any currently active alloc
1296 // regions so that all the ALLOC / RETIRE events are generated
1297 // before the start GC event.
1298 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1299
1300 // We may have added regions to the current incremental collection
1301 // set between the last GC or pause and now. We need to clear the
1302 // incremental collection set and then start rebuilding it afresh
1303 // after this full GC.
1304 abandon_collection_set(g1_policy()->inc_cset_head());
1305 g1_policy()->clear_incremental_cset();
1306 g1_policy()->stop_incremental_cset_building();
1307
1308 tear_down_region_sets(false /* free_list_only */);
1309 collector_state()->set_gcs_are_young(true);
1310
1311 // See the comments in g1CollectedHeap.hpp and
1312 // G1CollectedHeap::ref_processing_init() about
1313 // how reference processing currently works in G1.
1314
1315 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1316 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1317
1318 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1319 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1347 ClassLoaderDataGraph::purge();
1348 MetaspaceAux::verify_metrics();
1349
1350 // Note: since we've just done a full GC, concurrent
1351 // marking is no longer active. Therefore we need not
1352 // re-enable reference discovery for the CM ref processor.
1353 // That will be done at the start of the next marking cycle.
1354 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1355 ref_processor_cm()->verify_no_references_recorded();
1356
1357 reset_gc_time_stamp();
1358 // Since everything potentially moved, we will clear all remembered
1359 // sets, and clear all cards. Later we will rebuild remembered
1360 // sets. We will also reset the GC time stamps of the regions.
1361 clear_rsets_post_compaction();
1362 check_gc_time_stamps();
1363
1364 // Resize the heap if necessary.
1365 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1366
1367 if (_hr_printer.is_active()) {
1368 // We should do this after we potentially resize the heap so
1369 // that all the COMMIT / UNCOMMIT events are generated before
1370 // the end GC event.
1371
1372 print_hrm_post_compaction();
1373 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1374 }
1375
1376 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1377 if (hot_card_cache->use_cache()) {
1378 hot_card_cache->reset_card_counts();
1379 hot_card_cache->reset_hot_cache();
1380 }
1381
1382 // Rebuild remembered sets of all regions.
1383 uint n_workers =
1384 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1385 workers()->active_workers(),
1386 Threads::number_of_non_daemon_threads());
1387 workers()->set_active_workers(n_workers);
1388
1389 ParRebuildRSTask rebuild_rs_task(this);
1390 workers()->run_task(&rebuild_rs_task);
1391
1392 // Rebuild the strong code root lists for each region
1393 rebuild_strong_code_roots();
1394
1423 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1424 // objects marked during a full GC against the previous bitmap.
1425 // But we need to clear it before calling check_bitmaps below since
1426 // the full GC has compacted objects and updated TAMS but not updated
1427 // the prev bitmap.
1428 if (G1VerifyBitmaps) {
1429 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1430 }
1431 check_bitmaps("Full GC End");
1432
1433 // Start a new incremental collection set for the next pause
1434 assert(g1_policy()->collection_set() == NULL, "must be");
1435 g1_policy()->start_incremental_cset_building();
1436
1437 clear_cset_fast_test();
1438
1439 _allocator->init_mutator_alloc_region();
1440
1441 g1_policy()->record_full_collection_end();
1442
1443 if (G1Log::fine()) {
1444 g1_policy()->print_heap_transition();
1445 }
1446
1447 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1448 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1449 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1450 // before any GC notifications are raised.
1451 g1mm()->update_sizes();
1452
1453 gc_epilogue(true);
1454 }
1455
1456 if (G1Log::finer()) {
1457 g1_policy()->print_detailed_heap_transition(true /* full */);
1458 }
1459
1460 print_heap_after_gc();
1461 trace_heap_after_gc(gc_tracer);
1462
1463 post_full_gc_dump(gc_timer);
1464
1465 gc_timer->register_gc_end();
1466 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1467 }
1468
1469 return true;
1470 }
1471
1472 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1473 // do_collection() will return whether it succeeded in performing
1474 // the GC. Currently, there is no facility on the
1475 // do_full_collection() API to notify the caller than the collection
1476 // did not succeed (e.g., because it was locked out by the GC
1477 // locker). So, right now, we'll ignore the return value.
1478 bool dummy = do_collection(true, /* explicit_gc */
1523
1524 // This assert only makes sense here, before we adjust them
1525 // with respect to the min and max heap size.
1526 assert(minimum_desired_capacity <= maximum_desired_capacity,
1527 "minimum_desired_capacity = " SIZE_FORMAT ", "
1528 "maximum_desired_capacity = " SIZE_FORMAT,
1529 minimum_desired_capacity, maximum_desired_capacity);
1530
1531 // Should not be greater than the heap max size. No need to adjust
1532 // it with respect to the heap min size as it's a lower bound (i.e.,
1533 // we'll try to make the capacity larger than it, not smaller).
1534 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1535 // Should not be less than the heap min size. No need to adjust it
1536 // with respect to the heap max size as it's an upper bound (i.e.,
1537 // we'll try to make the capacity smaller than it, not greater).
1538 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1539
1540 if (capacity_after_gc < minimum_desired_capacity) {
1541 // Don't expand unless it's significant
1542 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1543 ergo_verbose4(ErgoHeapSizing,
1544 "attempt heap expansion",
1545 ergo_format_reason("capacity lower than "
1546 "min desired capacity after Full GC")
1547 ergo_format_byte("capacity")
1548 ergo_format_byte("occupancy")
1549 ergo_format_byte_perc("min desired capacity"),
1550 capacity_after_gc, used_after_gc,
1551 minimum_desired_capacity, (double) MinHeapFreeRatio);
1552 expand(expand_bytes);
1553
1554 // No expansion, now see if we want to shrink
1555 } else if (capacity_after_gc > maximum_desired_capacity) {
1556 // Capacity too large, compute shrinking size
1557 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1558 ergo_verbose4(ErgoHeapSizing,
1559 "attempt heap shrinking",
1560 ergo_format_reason("capacity higher than "
1561 "max desired capacity after Full GC")
1562 ergo_format_byte("capacity")
1563 ergo_format_byte("occupancy")
1564 ergo_format_byte_perc("max desired capacity"),
1565 capacity_after_gc, used_after_gc,
1566 maximum_desired_capacity, (double) MaxHeapFreeRatio);
1567 shrink(shrink_bytes);
1568 }
1569 }
1570
1571 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1572 AllocationContext_t context,
1573 bool do_gc,
1574 bool clear_all_soft_refs,
1575 bool expect_null_mutator_alloc_region,
1576 bool* gc_succeeded) {
1577 *gc_succeeded = true;
1578 // Let's attempt the allocation first.
1579 HeapWord* result =
1580 attempt_allocation_at_safepoint(word_size,
1581 context,
1582 expect_null_mutator_alloc_region);
1583 if (result != NULL) {
1584 assert(*gc_succeeded, "sanity");
1585 return result;
1586 }
1653
1654 // What else? We might try synchronous finalization later. If the total
1655 // space available is large enough for the allocation, then a more
1656 // complete compaction phase than we've tried so far might be
1657 // appropriate.
1658 assert(*succeeded, "sanity");
1659 return NULL;
1660 }
1661
1662 // Attempting to expand the heap sufficiently
1663 // to support an allocation of the given "word_size". If
1664 // successful, perform the allocation and return the address of the
1665 // allocated block, or else "NULL".
1666
1667 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1668 assert_at_safepoint(true /* should_be_vm_thread */);
1669
1670 verify_region_sets_optional();
1671
1672 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1673 ergo_verbose1(ErgoHeapSizing,
1674 "attempt heap expansion",
1675 ergo_format_reason("allocation request failed")
1676 ergo_format_byte("allocation request"),
1677 word_size * HeapWordSize);
1678 if (expand(expand_bytes)) {
1679 _hrm.verify_optional();
1680 verify_region_sets_optional();
1681 return attempt_allocation_at_safepoint(word_size,
1682 context,
1683 false /* expect_null_mutator_alloc_region */);
1684 }
1685 return NULL;
1686 }
1687
1688 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1689 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1690 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1691 HeapRegion::GrainBytes);
1692 ergo_verbose2(ErgoHeapSizing,
1693 "expand the heap",
1694 ergo_format_byte("requested expansion amount")
1695 ergo_format_byte("attempted expansion amount"),
1696 expand_bytes, aligned_expand_bytes);
1697
1698 if (is_maximal_no_gc()) {
1699 ergo_verbose0(ErgoHeapSizing,
1700 "did not expand the heap",
1701 ergo_format_reason("heap already fully expanded"));
1702 return false;
1703 }
1704
1705 double expand_heap_start_time_sec = os::elapsedTime();
1706 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1707 assert(regions_to_expand > 0, "Must expand by at least one region");
1708
1709 uint expanded_by = _hrm.expand_by(regions_to_expand);
1710 if (expand_time_ms != NULL) {
1711 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1712 }
1713
1714 if (expanded_by > 0) {
1715 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1716 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1717 g1_policy()->record_new_heap_size(num_regions());
1718 } else {
1719 ergo_verbose0(ErgoHeapSizing,
1720 "did not expand the heap",
1721 ergo_format_reason("heap expansion operation failed"));
1722 // The expansion of the virtual storage space was unsuccessful.
1723 // Let's see if it was because we ran out of swap.
1724 if (G1ExitOnExpansionFailure &&
1725 _hrm.available() >= regions_to_expand) {
1726 // We had head room...
1727 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1728 }
1729 }
1730 return regions_to_expand > 0;
1731 }
1732
1733 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1734 size_t aligned_shrink_bytes =
1735 ReservedSpace::page_align_size_down(shrink_bytes);
1736 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1737 HeapRegion::GrainBytes);
1738 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1739
1740 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1741 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1742
1743 ergo_verbose3(ErgoHeapSizing,
1744 "shrink the heap",
1745 ergo_format_byte("requested shrinking amount")
1746 ergo_format_byte("aligned shrinking amount")
1747 ergo_format_byte("attempted shrinking amount"),
1748 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1749 if (num_regions_removed > 0) {
1750 g1_policy()->record_new_heap_size(num_regions());
1751 } else {
1752 ergo_verbose0(ErgoHeapSizing,
1753 "did not shrink the heap",
1754 ergo_format_reason("heap shrinking operation failed"));
1755 }
1756 }
1757
1758 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1759 verify_region_sets_optional();
1760
1761 // We should only reach here at the end of a Full GC which means we
1762 // should not not be holding to any GC alloc regions. The method
1763 // below will make sure of that and do any remaining clean up.
1764 _allocator->abandon_gc_alloc_regions();
1765
1766 // Instead of tearing down / rebuilding the free lists here, we
1767 // could instead use the remove_all_pending() method on free_list to
1768 // remove only the ones that we need to remove.
1769 tear_down_region_sets(true /* free_list_only */);
1770 shrink_helper(shrink_bytes);
1771 rebuild_region_sets(true /* free_list_only */);
1772
1773 _hrm.verify_optional();
1774 verify_region_sets_optional();
1846 // Initialize the G1EvacuationFailureALot counters and flags.
1847 NOT_PRODUCT(reset_evacuation_should_fail();)
1848
1849 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1850 }
1851
1852 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1853 size_t size,
1854 size_t translation_factor) {
1855 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1856 // Allocate a new reserved space, preferring to use large pages.
1857 ReservedSpace rs(size, preferred_page_size);
1858 G1RegionToSpaceMapper* result =
1859 G1RegionToSpaceMapper::create_mapper(rs,
1860 size,
1861 rs.alignment(),
1862 HeapRegion::GrainBytes,
1863 translation_factor,
1864 mtGC);
1865 if (TracePageSizes) {
1866 gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1867 description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1868 }
1869 return result;
1870 }
1871
1872 jint G1CollectedHeap::initialize() {
1873 CollectedHeap::pre_initialize();
1874 os::enable_vtime();
1875
1876 G1Log::init();
1877
1878 // Necessary to satisfy locking discipline assertions.
1879
1880 MutexLocker x(Heap_lock);
1881
1882 // We have to initialize the printer before committing the heap, as
1883 // it will be used then.
1884 _hr_printer.set_active(G1PrintHeapRegions);
1885
1886 // While there are no constraints in the GC code that HeapWordSize
1887 // be any particular value, there are multiple other areas in the
1888 // system which believe this to be true (e.g. oop->object_size in some
1889 // cases incorrectly returns the size in wordSize units rather than
1890 // HeapWordSize).
1891 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1892
1893 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1894 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1895 size_t heap_alignment = collector_policy()->heap_alignment();
1896
1897 // Ensure that the sizes are properly aligned.
1898 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1899 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1900 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1901
1902 _refine_cte_cl = new RefineCardTableEntryClosure();
1903
1904 jint ecode = JNI_OK;
1905 _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);
2058 G1AllocRegion::setup(this, dummy_region);
2059
2060 _allocator->init_mutator_alloc_region();
2061
2062 // Do create of the monitoring and management support so that
2063 // values in the heap have been properly initialized.
2064 _g1mm = new G1MonitoringSupport(this);
2065
2066 G1StringDedup::initialize();
2067
2068 _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2069 for (uint i = 0; i < ParallelGCThreads; i++) {
2070 new (&_preserved_objs[i]) OopAndMarkOopStack();
2071 }
2072
2073 return JNI_OK;
2074 }
2075
2076 void G1CollectedHeap::stop() {
2077 // Stop all concurrent threads. We do this to make sure these threads
2078 // do not continue to execute and access resources (e.g. gclog_or_tty)
2079 // that are destroyed during shutdown.
2080 _cg1r->stop();
2081 _cmThread->stop();
2082 if (G1StringDedup::is_enabled()) {
2083 G1StringDedup::stop();
2084 }
2085 }
2086
2087 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2088 return HeapRegion::max_region_size();
2089 }
2090
2091 void G1CollectedHeap::post_initialize() {
2092 CollectedHeap::post_initialize();
2093 ref_processing_init();
2094 }
2095
2096 void G1CollectedHeap::ref_processing_init() {
2097 // Reference processing in G1 currently works as follows:
2098 //
2175 }
2176
2177 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2178 hr->reset_gc_time_stamp();
2179 }
2180
2181 #ifndef PRODUCT
2182
2183 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2184 private:
2185 unsigned _gc_time_stamp;
2186 bool _failures;
2187
2188 public:
2189 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2190 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2191
2192 virtual bool doHeapRegion(HeapRegion* hr) {
2193 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2194 if (_gc_time_stamp != region_gc_time_stamp) {
2195 gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2196 "expected %d", HR_FORMAT_PARAMS(hr),
2197 region_gc_time_stamp, _gc_time_stamp);
2198 _failures = true;
2199 }
2200 return false;
2201 }
2202
2203 bool failures() { return _failures; }
2204 };
2205
2206 void G1CollectedHeap::check_gc_time_stamps() {
2207 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2208 heap_region_iterate(&cl);
2209 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2210 }
2211 #endif // PRODUCT
2212
2213 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2214 _cg1r->hot_card_cache()->drain(cl, worker_i);
2215 }
2216
2760 private:
2761 G1CollectedHeap* _g1h;
2762 VerifyOption _vo;
2763 bool _failures;
2764 public:
2765 // _vo == UsePrevMarking -> use "prev" marking information,
2766 // _vo == UseNextMarking -> use "next" marking information,
2767 // _vo == UseMarkWord -> use mark word from object header.
2768 VerifyRootsClosure(VerifyOption vo) :
2769 _g1h(G1CollectedHeap::heap()),
2770 _vo(vo),
2771 _failures(false) { }
2772
2773 bool failures() { return _failures; }
2774
2775 template <class T> void do_oop_nv(T* p) {
2776 T heap_oop = oopDesc::load_heap_oop(p);
2777 if (!oopDesc::is_null(heap_oop)) {
2778 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2779 if (_g1h->is_obj_dead_cond(obj, _vo)) {
2780 gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2781 "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2782 if (_vo == VerifyOption_G1UseMarkWord) {
2783 gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2784 }
2785 obj->print_on(gclog_or_tty);
2786 _failures = true;
2787 }
2788 }
2789 }
2790
2791 void do_oop(oop* p) { do_oop_nv(p); }
2792 void do_oop(narrowOop* p) { do_oop_nv(p); }
2793 };
2794
2795 class G1VerifyCodeRootOopClosure: public OopClosure {
2796 G1CollectedHeap* _g1h;
2797 OopClosure* _root_cl;
2798 nmethod* _nm;
2799 VerifyOption _vo;
2800 bool _failures;
2801
2802 template <class T> void do_oop_work(T* p) {
2803 // First verify that this root is live
2804 _root_cl->do_oop(p);
2805
2810
2811 // Don't check the code roots during marking verification in a full GC
2812 if (_vo == VerifyOption_G1UseMarkWord) {
2813 return;
2814 }
2815
2816 // Now verify that the current nmethod (which contains p) is
2817 // in the code root list of the heap region containing the
2818 // object referenced by p.
2819
2820 T heap_oop = oopDesc::load_heap_oop(p);
2821 if (!oopDesc::is_null(heap_oop)) {
2822 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2823
2824 // Now fetch the region containing the object
2825 HeapRegion* hr = _g1h->heap_region_containing(obj);
2826 HeapRegionRemSet* hrrs = hr->rem_set();
2827 // Verify that the strong code root list for this region
2828 // contains the nmethod
2829 if (!hrrs->strong_code_roots_list_contains(_nm)) {
2830 gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2831 "from nmethod " PTR_FORMAT " not in strong "
2832 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2833 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2834 _failures = true;
2835 }
2836 }
2837 }
2838
2839 public:
2840 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2841 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2842
2843 void do_oop(oop* p) { do_oop_work(p); }
2844 void do_oop(narrowOop* p) { do_oop_work(p); }
2845
2846 void set_nmethod(nmethod* nm) { _nm = nm; }
2847 bool failures() { return _failures; }
2848 };
2849
2850 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2991 }
2992
2993 bool doHeapRegion(HeapRegion* r) {
2994 // For archive regions, verify there are no heap pointers to
2995 // non-pinned regions. For all others, verify liveness info.
2996 if (r->is_archive()) {
2997 VerifyArchiveRegionClosure verify_oop_pointers(r);
2998 r->object_iterate(&verify_oop_pointers);
2999 return true;
3000 }
3001 if (!r->is_continues_humongous()) {
3002 bool failures = false;
3003 r->verify(_vo, &failures);
3004 if (failures) {
3005 _failures = true;
3006 } else if (!r->is_starts_humongous()) {
3007 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3008 r->object_iterate(¬_dead_yet_cl);
3009 if (_vo != VerifyOption_G1UseNextMarking) {
3010 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3011 gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3012 "max_live_bytes " SIZE_FORMAT " "
3013 "< calculated " SIZE_FORMAT,
3014 p2i(r->bottom()), p2i(r->end()),
3015 r->max_live_bytes(),
3016 not_dead_yet_cl.live_bytes());
3017 _failures = true;
3018 }
3019 } else {
3020 // When vo == UseNextMarking we cannot currently do a sanity
3021 // check on the live bytes as the calculation has not been
3022 // finalized yet.
3023 }
3024 }
3025 }
3026 return false; // stop the region iteration if we hit a failure
3027 }
3028 };
3029
3030 // This is the task used for parallel verification of the heap regions
3031
3032 class G1ParVerifyTask: public AbstractGangTask {
3033 private:
3034 G1CollectedHeap* _g1h;
3035 VerifyOption _vo;
3036 bool _failures;
3044 AbstractGangTask("Parallel verify task"),
3045 _g1h(g1h),
3046 _vo(vo),
3047 _failures(false),
3048 _hrclaimer(g1h->workers()->active_workers()) {}
3049
3050 bool failures() {
3051 return _failures;
3052 }
3053
3054 void work(uint worker_id) {
3055 HandleMark hm;
3056 VerifyRegionClosure blk(true, _vo);
3057 _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3058 if (blk.failures()) {
3059 _failures = true;
3060 }
3061 }
3062 };
3063
3064 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3065 if (SafepointSynchronize::is_at_safepoint()) {
3066 assert(Thread::current()->is_VM_thread(),
3067 "Expected to be executed serially by the VM thread at this point");
3068
3069 if (!silent) { gclog_or_tty->print("Roots "); }
3070 VerifyRootsClosure rootsCl(vo);
3071 VerifyKlassClosure klassCl(this, &rootsCl);
3072 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3073
3074 // We apply the relevant closures to all the oops in the
3075 // system dictionary, class loader data graph, the string table
3076 // and the nmethods in the code cache.
3077 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3078 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3079
3080 {
3081 G1RootProcessor root_processor(this, 1);
3082 root_processor.process_all_roots(&rootsCl,
3083 &cldCl,
3084 &blobsCl);
3085 }
3086
3087 bool failures = rootsCl.failures() || codeRootsCl.failures();
3088
3089 if (vo != VerifyOption_G1UseMarkWord) {
3090 // If we're verifying during a full GC then the region sets
3091 // will have been torn down at the start of the GC. Therefore
3092 // verifying the region sets will fail. So we only verify
3093 // the region sets when not in a full GC.
3094 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3095 verify_region_sets();
3096 }
3097
3098 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3099 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3100
3101 G1ParVerifyTask task(this, vo);
3102 workers()->run_task(&task);
3103 if (task.failures()) {
3104 failures = true;
3105 }
3106
3107 } else {
3108 VerifyRegionClosure blk(false, vo);
3109 heap_region_iterate(&blk);
3110 if (blk.failures()) {
3111 failures = true;
3112 }
3113 }
3114
3115 if (G1StringDedup::is_enabled()) {
3116 if (!silent) gclog_or_tty->print("StrDedup ");
3117 G1StringDedup::verify();
3118 }
3119
3120 if (failures) {
3121 gclog_or_tty->print_cr("Heap:");
3122 // It helps to have the per-region information in the output to
3123 // help us track down what went wrong. This is why we call
3124 // print_extended_on() instead of print_on().
3125 print_extended_on(gclog_or_tty);
3126 gclog_or_tty->cr();
3127 gclog_or_tty->flush();
3128 }
3129 guarantee(!failures, "there should not have been any failures");
3130 } else {
3131 if (!silent) {
3132 gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3133 if (G1StringDedup::is_enabled()) {
3134 gclog_or_tty->print(", StrDedup");
3135 }
3136 gclog_or_tty->print(") ");
3137 }
3138 }
3139 }
3140
3141 void G1CollectedHeap::verify(bool silent) {
3142 verify(silent, VerifyOption_G1UsePrevMarking);
3143 }
3144
3145 double G1CollectedHeap::verify(bool guard, const char* msg) {
3146 double verify_time_ms = 0.0;
3147
3148 if (guard && total_collections() >= VerifyGCStartAt) {
3149 double verify_start = os::elapsedTime();
3150 HandleMark hm; // Discard invalid handles created during verification
3151 prepare_for_verify();
3152 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3153 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3154 }
3155
3156 return verify_time_ms;
3157 }
3158
3159 void G1CollectedHeap::verify_before_gc() {
3160 double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3161 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3162 }
3163
3164 void G1CollectedHeap::verify_after_gc() {
3165 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3166 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3167 }
3168
3169 class PrintRegionClosure: public HeapRegionClosure {
3170 outputStream* _st;
3171 public:
3172 PrintRegionClosure(outputStream* st) : _st(st) {}
3173 bool doHeapRegion(HeapRegion* r) {
3174 r->print_on(_st);
3175 return false;
3176 }
3177 };
3178
3179 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3180 const HeapRegion* hr,
3181 const VerifyOption vo) const {
3182 switch (vo) {
3183 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3184 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3185 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
3255 G1StringDedup::print_worker_threads_on(st);
3256 }
3257 }
3258
3259 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3260 workers()->threads_do(tc);
3261 tc->do_thread(_cmThread);
3262 _cg1r->threads_do(tc);
3263 if (G1StringDedup::is_enabled()) {
3264 G1StringDedup::threads_do(tc);
3265 }
3266 }
3267
3268 void G1CollectedHeap::print_tracing_info() const {
3269 // We'll overload this to mean "trace GC pause statistics."
3270 if (TraceYoungGenTime || TraceOldGenTime) {
3271 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3272 // to that.
3273 g1_policy()->print_tracing_info();
3274 }
3275 if (G1SummarizeRSetStats) {
3276 g1_rem_set()->print_summary_info();
3277 }
3278 if (G1SummarizeConcMark) {
3279 concurrent_mark()->print_summary_info();
3280 }
3281 g1_policy()->print_yg_surv_rate_info();
3282 }
3283
3284 #ifndef PRODUCT
3285 // Helpful for debugging RSet issues.
3286
3287 class PrintRSetsClosure : public HeapRegionClosure {
3288 private:
3289 const char* _msg;
3290 size_t _occupied_sum;
3291
3292 public:
3293 bool doHeapRegion(HeapRegion* r) {
3294 HeapRegionRemSet* hrrs = r->rem_set();
3295 size_t occupied = hrrs->occupied();
3296 _occupied_sum += occupied;
3297
3298 gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3299 HR_FORMAT_PARAMS(r));
3300 if (occupied == 0) {
3301 gclog_or_tty->print_cr(" RSet is empty");
3302 } else {
3303 hrrs->print();
3304 }
3305 gclog_or_tty->print_cr("----------");
3306 return false;
3307 }
3308
3309 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3310 gclog_or_tty->cr();
3311 gclog_or_tty->print_cr("========================================");
3312 gclog_or_tty->print_cr("%s", msg);
3313 gclog_or_tty->cr();
3314 }
3315
3316 ~PrintRSetsClosure() {
3317 gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3318 gclog_or_tty->print_cr("========================================");
3319 gclog_or_tty->cr();
3320 }
3321 };
3322
3323 void G1CollectedHeap::print_cset_rsets() {
3324 PrintRSetsClosure cl("Printing CSet RSets");
3325 collection_set_iterate(&cl);
3326 }
3327
3328 void G1CollectedHeap::print_all_rsets() {
3329 PrintRSetsClosure cl("Printing All RSets");;
3330 heap_region_iterate(&cl);
3331 }
3332 #endif // PRODUCT
3333
3334 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3335 YoungList* young_list = heap()->young_list();
3336
3337 size_t eden_used_bytes = young_list->eden_used_bytes();
3338 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3339
3357
3358 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3359 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3360 }
3361
3362
3363 G1CollectedHeap* G1CollectedHeap::heap() {
3364 CollectedHeap* heap = Universe::heap();
3365 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3366 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3367 return (G1CollectedHeap*)heap;
3368 }
3369
3370 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3371 // always_do_update_barrier = false;
3372 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3373 // Fill TLAB's and such
3374 accumulate_statistics_all_tlabs();
3375 ensure_parsability(true);
3376
3377 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3378 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3379 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3380 }
3381 }
3382
3383 void G1CollectedHeap::gc_epilogue(bool full) {
3384
3385 if (G1SummarizeRSetStats &&
3386 (G1SummarizeRSetStatsPeriod > 0) &&
3387 // we are at the end of the GC. Total collections has already been increased.
3388 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3389 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3390 }
3391
3392 // FIXME: what is this about?
3393 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3394 // is set.
3395 #if defined(COMPILER2) || INCLUDE_JVMCI
3396 assert(DerivedPointerTable::is_empty(), "derived pointer present");
3397 #endif
3398 // always_do_update_barrier = true;
3399
3400 resize_all_tlabs();
3401 allocation_context_stats().update(full);
3402
3403 // We have just completed a GC. Update the soft reference
3404 // policy with the new heap occupancy
3405 Universe::update_heap_info_at_gc();
3406 }
3407
3408 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3409 uint gc_count_before,
3410 bool* succeeded,
3613 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3614
3615 // Here's a good place to add any other checks we'd like to
3616 // perform on CSet regions.
3617 return false;
3618 }
3619 };
3620 #endif // ASSERT
3621
3622 uint G1CollectedHeap::num_task_queues() const {
3623 return _task_queues->size();
3624 }
3625
3626 #if TASKQUEUE_STATS
3627 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3628 st->print_raw_cr("GC Task Stats");
3629 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3630 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3631 }
3632
3633 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3634 print_taskqueue_stats_hdr(st);
3635
3636 TaskQueueStats totals;
3637 const uint n = num_task_queues();
3638 for (uint i = 0; i < n; ++i) {
3639 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3640 totals += task_queue(i)->stats;
3641 }
3642 st->print_raw("tot "); totals.print(st); st->cr();
3643
3644 DEBUG_ONLY(totals.verify());
3645 }
3646
3647 void G1CollectedHeap::reset_taskqueue_stats() {
3648 const uint n = num_task_queues();
3649 for (uint i = 0; i < n; ++i) {
3650 task_queue(i)->stats.reset();
3651 }
3652 }
3653 #endif // TASKQUEUE_STATS
3654
3655 void G1CollectedHeap::log_gc_header() {
3656 if (!G1Log::fine()) {
3657 return;
3658 }
3659
3660 gclog_or_tty->gclog_stamp();
3661
3662 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3663 .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3664 .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3665
3666 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3667 }
3668
3669 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3670 if (!G1Log::fine()) {
3671 return;
3672 }
3673
3674 if (G1Log::finer()) {
3675 if (evacuation_failed()) {
3676 gclog_or_tty->print(" (to-space exhausted)");
3677 }
3678 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3679 g1_policy()->print_phases(pause_time_sec);
3680 g1_policy()->print_detailed_heap_transition();
3681 } else {
3682 if (evacuation_failed()) {
3683 gclog_or_tty->print("--");
3684 }
3685 g1_policy()->print_heap_transition();
3686 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3687 }
3688 gclog_or_tty->flush();
3689 }
3690
3691 void G1CollectedHeap::wait_for_root_region_scanning() {
3692 double scan_wait_start = os::elapsedTime();
3693 // We have to wait until the CM threads finish scanning the
3694 // root regions as it's the only way to ensure that all the
3695 // objects on them have been correctly scanned before we start
3696 // moving them during the GC.
3697 bool waited = _cm->root_regions()->wait_until_scan_finished();
3698 double wait_time_ms = 0.0;
3699 if (waited) {
3700 double scan_wait_end = os::elapsedTime();
3701 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3702 }
3703 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3704 }
3705
3706 bool
3707 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3708 assert_at_safepoint(true /* should_be_vm_thread */);
3709 guarantee(!is_gc_active(), "collection is not reentrant");
3710
3711 if (GC_locker::check_active_before_gc()) {
3712 return false;
3713 }
3714
3715 _gc_timer_stw->register_gc_start();
3716
3717 GCIdMark gc_id_mark;
3718 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3719
3720 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3721 ResourceMark rm;
3722
3723 wait_for_root_region_scanning();
3724
3725 G1Log::update_level();
3726 print_heap_before_gc();
3727 trace_heap_before_gc(_gc_tracer_stw);
3728
3729 verify_region_sets_optional();
3730 verify_dirty_young_regions();
3731
3732 // This call will decide whether this pause is an initial-mark
3733 // pause. If it is, during_initial_mark_pause() will return true
3734 // for the duration of this pause.
3735 g1_policy()->decide_on_conc_mark_initiation();
3736
3737 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3738 assert(!collector_state()->during_initial_mark_pause() ||
3739 collector_state()->gcs_are_young(), "sanity");
3740
3741 // We also do not allow mixed GCs during marking.
3742 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3743
3744 // Record whether this pause is an initial mark. When the current
3745 // thread has completed its logging output and it's safe to signal
3746 // the CM thread, the flag's value in the policy has been reset.
3747 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3748
3749 // Inner scope for scope based logging, timers, and stats collection
3750 {
3751 EvacuationInfo evacuation_info;
3752
3753 if (collector_state()->during_initial_mark_pause()) {
3754 // We are about to start a marking cycle, so we increment the
3755 // full collection counter.
3756 increment_old_marking_cycles_started();
3757 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3758 }
3759
3760 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3761
3762 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3763
3764 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3765 workers()->active_workers(),
3766 Threads::number_of_non_daemon_threads());
3767 workers()->set_active_workers(active_workers);
3768
3769 double pause_start_sec = os::elapsedTime();
3770 g1_policy()->note_gc_start(active_workers);
3771 log_gc_header();
3772
3773 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3774 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3775
3776 // If the secondary_free_list is not empty, append it to the
3777 // free_list. No need to wait for the cleanup operation to finish;
3778 // the region allocation code will check the secondary_free_list
3779 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3780 // set, skip this step so that the region allocation code has to
3781 // get entries from the secondary_free_list.
3782 if (!G1StressConcRegionFreeing) {
3783 append_secondary_free_list_if_not_empty_with_lock();
3784 }
3785
3786 assert(check_young_list_well_formed(), "young list should be well formed");
3787
3788 // Don't dynamically change the number of GC threads this early. A value of
3789 // 0 is used to indicate serial work. When parallel work is done,
3790 // it will be set.
3791
3805 #endif
3806
3807 // Please see comment in g1CollectedHeap.hpp and
3808 // G1CollectedHeap::ref_processing_init() to see how
3809 // reference processing currently works in G1.
3810
3811 // Enable discovery in the STW reference processor
3812 ref_processor_stw()->enable_discovery();
3813
3814 {
3815 // We want to temporarily turn off discovery by the
3816 // CM ref processor, if necessary, and turn it back on
3817 // on again later if we do. Using a scoped
3818 // NoRefDiscovery object will do this.
3819 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3820
3821 // Forget the current alloc region (we might even choose it to be part
3822 // of the collection set!).
3823 _allocator->release_mutator_alloc_region();
3824
3825 // We should call this after we retire the mutator alloc
3826 // region(s) so that all the ALLOC / RETIRE events are generated
3827 // before the start GC event.
3828 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3829
3830 // This timing is only used by the ergonomics to handle our pause target.
3831 // It is unclear why this should not include the full pause. We will
3832 // investigate this in CR 7178365.
3833 //
3834 // Preserving the old comment here if that helps the investigation:
3835 //
3836 // The elapsed time induced by the start time below deliberately elides
3837 // the possible verification above.
3838 double sample_start_time_sec = os::elapsedTime();
3839
3840 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3841
3842 if (collector_state()->during_initial_mark_pause()) {
3843 concurrent_mark()->checkpointRootsInitialPre();
3844 }
3845
3846 double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3847 g1_policy()->finalize_old_cset_part(time_remaining_ms);
3848
3849 evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3926
3927 if (collector_state()->during_initial_mark_pause()) {
3928 // We have to do this before we notify the CM threads that
3929 // they can start working to make sure that all the
3930 // appropriate initialization is done on the CM object.
3931 concurrent_mark()->checkpointRootsInitialPost();
3932 collector_state()->set_mark_in_progress(true);
3933 // Note that we don't actually trigger the CM thread at
3934 // this point. We do that later when we're sure that
3935 // the current thread has completed its logging output.
3936 }
3937
3938 allocate_dummy_regions();
3939
3940 _allocator->init_mutator_alloc_region();
3941
3942 {
3943 size_t expand_bytes = g1_policy()->expansion_amount();
3944 if (expand_bytes > 0) {
3945 size_t bytes_before = capacity();
3946 // No need for an ergo verbose message here,
3947 // expansion_amount() does this when it returns a value > 0.
3948 double expand_ms;
3949 if (!expand(expand_bytes, &expand_ms)) {
3950 // We failed to expand the heap. Cannot do anything about it.
3951 }
3952 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3953 }
3954 }
3955
3956 // We redo the verification but now wrt to the new CSet which
3957 // has just got initialized after the previous CSet was freed.
3958 _cm->verify_no_cset_oops();
3959 _cm->note_end_of_gc();
3960
3961 // This timing is only used by the ergonomics to handle our pause target.
3962 // It is unclear why this should not include the full pause. We will
3963 // investigate this in CR 7178365.
3964 double sample_end_time_sec = os::elapsedTime();
3965 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3966 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3986 // stamp here we invalidate all the GC time stamps on all the
3987 // regions and saved_mark_word() will simply return top() for
3988 // all the regions. This is a nicer way of ensuring this rather
3989 // than iterating over the regions and fixing them. In fact, the
3990 // GC time stamp increment here also ensures that
3991 // saved_mark_word() will return top() between pauses, i.e.,
3992 // during concurrent refinement. So we don't need the
3993 // is_gc_active() check to decided which top to use when
3994 // scanning cards (see CR 7039627).
3995 increment_gc_time_stamp();
3996
3997 verify_after_gc();
3998 check_bitmaps("GC End");
3999
4000 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4001 ref_processor_stw()->verify_no_references_recorded();
4002
4003 // CM reference discovery will be re-enabled if necessary.
4004 }
4005
4006 // We should do this after we potentially expand the heap so
4007 // that all the COMMIT events are generated before the end GC
4008 // event, and after we retire the GC alloc regions so that all
4009 // RETIRE events are generated before the end GC event.
4010 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4011
4012 #ifdef TRACESPINNING
4013 ParallelTaskTerminator::print_termination_counts();
4014 #endif
4015
4016 gc_epilogue(false);
4017 }
4018
4019 // Print the remainder of the GC log output.
4020 log_gc_footer(os::elapsedTime() - pause_start_sec);
4021
4022 // It is not yet to safe to tell the concurrent mark to
4023 // start as we have some optional output below. We don't want the
4024 // output from the concurrent mark thread interfering with this
4025 // logging output either.
4026
4027 _hrm.verify_optional();
4028 verify_region_sets_optional();
4029
4030 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4031 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4032
4033 print_heap_after_gc();
4034 trace_heap_after_gc(_gc_tracer_stw);
4035
4036 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4037 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4038 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4039 // before any GC notifications are raised.
4040 g1mm()->update_sizes();
4041
4042 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4043 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4044 _gc_timer_stw->register_gc_end();
4045 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4046 }
4047 // It should now be safe to tell the concurrent mark thread to start
4048 // without its logging output interfering with the logging output
4049 // that came from the pause.
4050
4201
4202 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4203
4204 double term_sec = 0.0;
4205 size_t evac_term_attempts = 0;
4206 {
4207 double start = os::elapsedTime();
4208 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4209 evac.do_void();
4210
4211 evac_term_attempts = evac.term_attempts();
4212 term_sec = evac.term_time();
4213 double elapsed_sec = os::elapsedTime() - start;
4214 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4215 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4216 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4217 }
4218
4219 assert(pss->queue_is_empty(), "should be empty");
4220
4221 if (PrintTerminationStats) {
4222 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4223 size_t lab_waste;
4224 size_t lab_undo_waste;
4225 pss->waste(lab_waste, lab_undo_waste);
4226 _g1h->print_termination_stats(gclog_or_tty,
4227 worker_id,
4228 (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */
4229 strong_roots_sec * 1000.0, /* strong roots time */
4230 term_sec * 1000.0, /* evac term time */
4231 evac_term_attempts, /* evac term attempts */
4232 lab_waste, /* alloc buffer waste */
4233 lab_undo_waste /* undo waste */
4234 );
4235 }
4236
4237 // Close the inner scope so that the ResourceMark and HandleMark
4238 // destructors are executed here and are included as part of the
4239 // "GC Worker Time".
4240 }
4241 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4242 }
4243 };
4244
4245 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4246 st->print_raw_cr("GC Termination Stats");
4247 st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
4248 st->print_raw_cr("thr ms ms % ms % attempts total alloc undo");
4249 st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4250 }
4251
4252 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4253 uint worker_id,
4254 double elapsed_ms,
4255 double strong_roots_ms,
4256 double term_ms,
4257 size_t term_attempts,
4258 size_t alloc_buffer_waste,
4259 size_t undo_waste) const {
4260 st->print_cr("%3d %9.2f %9.2f %6.2f "
4261 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4262 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4263 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4264 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4265 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4266 alloc_buffer_waste * HeapWordSize / K,
4267 undo_waste * HeapWordSize / K);
4268 }
4269
4270 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4271 private:
4272 BoolObjectClosure* _is_alive;
4273 int _initial_string_table_size;
4274 int _initial_symbol_table_size;
4275
4276 bool _process_strings;
4277 int _strings_processed;
4278 int _strings_removed;
4279
4280 bool _process_symbols;
4289 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4290
4291 _initial_string_table_size = StringTable::the_table()->table_size();
4292 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4293 if (process_strings) {
4294 StringTable::clear_parallel_claimed_index();
4295 }
4296 if (process_symbols) {
4297 SymbolTable::clear_parallel_claimed_index();
4298 }
4299 }
4300
4301 ~G1StringSymbolTableUnlinkTask() {
4302 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4303 "claim value %d after unlink less than initial string table size %d",
4304 StringTable::parallel_claimed_index(), _initial_string_table_size);
4305 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4306 "claim value %d after unlink less than initial symbol table size %d",
4307 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4308
4309 if (G1TraceStringSymbolTableScrubbing) {
4310 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4311 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4312 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4313 strings_processed(), strings_removed(),
4314 symbols_processed(), symbols_removed());
4315 }
4316 }
4317
4318 void work(uint worker_id) {
4319 int strings_processed = 0;
4320 int strings_removed = 0;
4321 int symbols_processed = 0;
4322 int symbols_removed = 0;
4323 if (_process_strings) {
4324 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4325 Atomic::add(strings_processed, &_strings_processed);
4326 Atomic::add(strings_removed, &_strings_removed);
4327 }
4328 if (_process_symbols) {
4329 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4330 Atomic::add(symbols_processed, &_symbols_processed);
4331 Atomic::add(symbols_removed, &_symbols_removed);
4332 }
4333 }
4334
4335 size_t strings_processed() const { return (size_t)_strings_processed; }
4336 size_t strings_removed() const { return (size_t)_strings_removed; }
5123
5124 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5125 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5126
5127 // Should G1EvacuationFailureALot be in effect for this GC?
5128 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5129
5130 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5131 double start_par_time_sec = os::elapsedTime();
5132 double end_par_time_sec;
5133
5134 {
5135 const uint n_workers = workers()->active_workers();
5136 G1RootProcessor root_processor(this, n_workers);
5137 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5138 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5139 if (collector_state()->during_initial_mark_pause()) {
5140 ClassLoaderDataGraph::clear_claimed_marks();
5141 }
5142
5143 // The individual threads will set their evac-failure closures.
5144 if (PrintTerminationStats) {
5145 print_termination_stats_hdr(gclog_or_tty);
5146 }
5147
5148 workers()->run_task(&g1_par_task);
5149 end_par_time_sec = os::elapsedTime();
5150
5151 // Closing the inner scope will execute the destructor
5152 // for the G1RootProcessor object. We record the current
5153 // elapsed time before closing the scope so that time
5154 // taken for the destructor is NOT included in the
5155 // reported parallel time.
5156 }
5157
5158 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5159
5160 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5161 phase_times->record_par_time(par_time_ms);
5162
5163 double code_root_fixup_time_ms =
5164 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5165 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5166
5351 }
5352 }
5353
5354 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5355 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5356 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5357 verify_dirty_region(hr);
5358 }
5359 }
5360
5361 void G1CollectedHeap::verify_dirty_young_regions() {
5362 verify_dirty_young_list(_young_list->first_region());
5363 }
5364
5365 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5366 HeapWord* tams, HeapWord* end) {
5367 guarantee(tams <= end,
5368 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5369 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5370 if (result < end) {
5371 gclog_or_tty->cr();
5372 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5373 bitmap_name, p2i(result));
5374 gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5375 bitmap_name, p2i(tams), p2i(end));
5376 return false;
5377 }
5378 return true;
5379 }
5380
5381 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5382 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5383 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5384
5385 HeapWord* bottom = hr->bottom();
5386 HeapWord* ptams = hr->prev_top_at_mark_start();
5387 HeapWord* ntams = hr->next_top_at_mark_start();
5388 HeapWord* end = hr->end();
5389
5390 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5391
5392 bool res_n = true;
5393 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5394 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5395 // if we happen to be in that state.
5396 if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5397 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5398 }
5399 if (!res_p || !res_n) {
5400 gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5401 HR_FORMAT_PARAMS(hr));
5402 gclog_or_tty->print_cr("#### Caller: %s", caller);
5403 return false;
5404 }
5405 return true;
5406 }
5407
5408 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5409 if (!G1VerifyBitmaps) return;
5410
5411 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5412 }
5413
5414 class G1VerifyBitmapClosure : public HeapRegionClosure {
5415 private:
5416 const char* _caller;
5417 G1CollectedHeap* _g1h;
5418 bool _failures;
5419
5420 public:
5421 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5422 _caller(caller), _g1h(g1h), _failures(false) { }
5434
5435 void G1CollectedHeap::check_bitmaps(const char* caller) {
5436 if (!G1VerifyBitmaps) return;
5437
5438 G1VerifyBitmapClosure cl(caller, this);
5439 heap_region_iterate(&cl);
5440 guarantee(!cl.failures(), "bitmap verification");
5441 }
5442
5443 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5444 private:
5445 bool _failures;
5446 public:
5447 G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5448
5449 virtual bool doHeapRegion(HeapRegion* hr) {
5450 uint i = hr->hrm_index();
5451 InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5452 if (hr->is_humongous()) {
5453 if (hr->in_collection_set()) {
5454 gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5455 _failures = true;
5456 return true;
5457 }
5458 if (cset_state.is_in_cset()) {
5459 gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5460 _failures = true;
5461 return true;
5462 }
5463 if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5464 gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5465 _failures = true;
5466 return true;
5467 }
5468 } else {
5469 if (cset_state.is_humongous()) {
5470 gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5471 _failures = true;
5472 return true;
5473 }
5474 if (hr->in_collection_set() != cset_state.is_in_cset()) {
5475 gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5476 hr->in_collection_set(), cset_state.value(), i);
5477 _failures = true;
5478 return true;
5479 }
5480 if (cset_state.is_in_cset()) {
5481 if (hr->is_young() != (cset_state.is_young())) {
5482 gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5483 hr->is_young(), cset_state.value(), i);
5484 _failures = true;
5485 return true;
5486 }
5487 if (hr->is_old() != (cset_state.is_old())) {
5488 gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5489 hr->is_old(), cset_state.value(), i);
5490 _failures = true;
5491 return true;
5492 }
5493 }
5494 }
5495 return false;
5496 }
5497
5498 bool failures() const { return _failures; }
5499 };
5500
5501 bool G1CollectedHeap::check_cset_fast_test() {
5502 G1CheckCSetFastTableClosure cl;
5503 _hrm.iterate(&cl);
5504 return !cl.failures();
5505 }
5506 #endif // PRODUCT
5507
5508 void G1CollectedHeap::cleanUpCardTable() {
5678 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5679 // until the end of a concurrent mark.
5680 //
5681 // It is not required to check whether the object has been found dead by marking
5682 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5683 // all objects allocated during that time are considered live.
5684 // SATB marking is even more conservative than the remembered set.
5685 // So if at this point in the collection there is no remembered set entry,
5686 // nobody has a reference to it.
5687 // At the start of collection we flush all refinement logs, and remembered sets
5688 // are completely up-to-date wrt to references to the humongous object.
5689 //
5690 // Other implementation considerations:
5691 // - never consider object arrays at this time because they would pose
5692 // considerable effort for cleaning up the the remembered sets. This is
5693 // required because stale remembered sets might reference locations that
5694 // are currently allocated into.
5695 uint region_idx = r->hrm_index();
5696 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5697 !r->rem_set()->is_empty()) {
5698
5699 if (G1TraceEagerReclaimHumongousObjects) {
5700 gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5701 region_idx,
5702 (size_t)obj->size() * HeapWordSize,
5703 p2i(r->bottom()),
5704 r->rem_set()->occupied(),
5705 r->rem_set()->strong_code_roots_list_length(),
5706 next_bitmap->isMarked(r->bottom()),
5707 g1h->is_humongous_reclaim_candidate(region_idx),
5708 obj->is_typeArray()
5709 );
5710 }
5711
5712 return false;
5713 }
5714
5715 guarantee(obj->is_typeArray(),
5716 "Only eagerly reclaiming type arrays is supported, but the object "
5717 PTR_FORMAT " is not.", p2i(r->bottom()));
5718
5719 if (G1TraceEagerReclaimHumongousObjects) {
5720 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5721 region_idx,
5722 (size_t)obj->size() * HeapWordSize,
5723 p2i(r->bottom()),
5724 r->rem_set()->occupied(),
5725 r->rem_set()->strong_code_roots_list_length(),
5726 next_bitmap->isMarked(r->bottom()),
5727 g1h->is_humongous_reclaim_candidate(region_idx),
5728 obj->is_typeArray()
5729 );
5730 }
5731 // Need to clear mark bit of the humongous object if already set.
5732 if (next_bitmap->isMarked(r->bottom())) {
5733 next_bitmap->clear(r->bottom());
5734 }
5735 do {
5736 HeapRegion* next = g1h->next_region_in_humongous(r);
5737 _freed_bytes += r->used();
5738 r->set_containing_set(NULL);
5739 _humongous_regions_removed.increment(1u, r->capacity());
5740 g1h->free_humongous_region(r, _free_region_list, false);
5741 r = next;
5742 } while (r != NULL);
5743
5744 return false;
5745 }
5746
5747 HeapRegionSetCount& humongous_free_count() {
5748 return _humongous_regions_removed;
5749 }
5750
5751 size_t bytes_freed() const {
5752 return _freed_bytes;
5753 }
5754
5755 size_t humongous_reclaimed() const {
5756 return _humongous_regions_removed.length();
5757 }
5758 };
5759
5760 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5761 assert_at_safepoint(true);
5762
5763 if (!G1EagerReclaimHumongousObjects ||
5764 (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
5765 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5766 return;
5767 }
5768
5769 double start_time = os::elapsedTime();
5770
5771 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5772
5773 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5774 heap_region_iterate(&cl);
5775
5776 HeapRegionSetCount empty_set;
5777 remove_from_old_sets(empty_set, cl.humongous_free_count());
5778
5779 G1HRPrinter* hrp = hr_printer();
5780 if (hrp->is_active()) {
5781 FreeRegionListIterator iter(&local_cleanup_list);
5782 while (iter.more_available()) {
5783 HeapRegion* hr = iter.get_next();
5784 hrp->cleanup(hr);
5797 // the current incremental collection set in preparation of a
5798 // full collection. After the full GC we will start to build up
5799 // the incremental collection set again.
5800 // This is only called when we're doing a full collection
5801 // and is immediately followed by the tearing down of the young list.
5802
5803 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5804 HeapRegion* cur = cs_head;
5805
5806 while (cur != NULL) {
5807 HeapRegion* next = cur->next_in_collection_set();
5808 assert(cur->in_collection_set(), "bad CS");
5809 cur->set_next_in_collection_set(NULL);
5810 clear_in_cset(cur);
5811 cur->set_young_index_in_cset(-1);
5812 cur = next;
5813 }
5814 }
5815
5816 void G1CollectedHeap::set_free_regions_coming() {
5817 if (G1ConcRegionFreeingVerbose) {
5818 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5819 "setting free regions coming");
5820 }
5821
5822 assert(!free_regions_coming(), "pre-condition");
5823 _free_regions_coming = true;
5824 }
5825
5826 void G1CollectedHeap::reset_free_regions_coming() {
5827 assert(free_regions_coming(), "pre-condition");
5828
5829 {
5830 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5831 _free_regions_coming = false;
5832 SecondaryFreeList_lock->notify_all();
5833 }
5834
5835 if (G1ConcRegionFreeingVerbose) {
5836 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5837 "reset free regions coming");
5838 }
5839 }
5840
5841 void G1CollectedHeap::wait_while_free_regions_coming() {
5842 // Most of the time we won't have to wait, so let's do a quick test
5843 // first before we take the lock.
5844 if (!free_regions_coming()) {
5845 return;
5846 }
5847
5848 if (G1ConcRegionFreeingVerbose) {
5849 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5850 "waiting for free regions");
5851 }
5852
5853 {
5854 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5855 while (free_regions_coming()) {
5856 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5857 }
5858 }
5859
5860 if (G1ConcRegionFreeingVerbose) {
5861 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5862 "done waiting for free regions");
5863 }
5864 }
5865
5866 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5867 return _allocator->is_retained_old_region(hr);
5868 }
5869
5870 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5871 _young_list->push_region(hr);
5872 }
5873
5874 class NoYoungRegionsClosure: public HeapRegionClosure {
5875 private:
5876 bool _success;
5877 public:
5878 NoYoungRegionsClosure() : _success(true) { }
5879 bool doHeapRegion(HeapRegion* r) {
5880 if (r->is_young()) {
5881 gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5882 p2i(r->bottom()), p2i(r->end()));
5883 _success = false;
5884 }
5885 return false;
5886 }
5887 bool success() { return _success; }
5888 };
5889
5890 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5891 bool ret = _young_list->check_list_empty(check_sample);
5892
5893 if (check_heap) {
5894 NoYoungRegionsClosure closure;
5895 heap_region_iterate(&closure);
5896 ret = ret && closure.success();
5897 }
5898
5899 return ret;
5900 }
5901
6112 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6113 size_t allocated_bytes,
6114 InCSetState dest) {
6115 bool during_im = collector_state()->during_initial_mark_pause();
6116 alloc_region->note_end_of_copying(during_im);
6117 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6118 if (dest.is_young()) {
6119 young_list()->add_survivor_region(alloc_region);
6120 } else {
6121 _old_set.add(alloc_region);
6122 }
6123 _hr_printer.retire(alloc_region);
6124 }
6125
6126 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6127 bool expanded = false;
6128 uint index = _hrm.find_highest_free(&expanded);
6129
6130 if (index != G1_NO_HRM_INDEX) {
6131 if (expanded) {
6132 ergo_verbose1(ErgoHeapSizing,
6133 "attempt heap expansion",
6134 ergo_format_reason("requested address range outside heap bounds")
6135 ergo_format_byte("region size"),
6136 HeapRegion::GrainWords * HeapWordSize);
6137 }
6138 _hrm.allocate_free_regions_starting_at(index, 1);
6139 return region_at(index);
6140 }
6141 return NULL;
6142 }
6143
6144 // Heap region set verification
6145
6146 class VerifyRegionListsClosure : public HeapRegionClosure {
6147 private:
6148 HeapRegionSet* _old_set;
6149 HeapRegionSet* _humongous_set;
6150 HeapRegionManager* _hrm;
6151
6152 public:
6153 HeapRegionSetCount _old_count;
6154 HeapRegionSetCount _humongous_count;
6155 HeapRegionSetCount _free_count;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/concurrentG1Refine.hpp"
32 #include "gc/g1/concurrentG1RefineThread.hpp"
33 #include "gc/g1/concurrentMarkThread.inline.hpp"
34 #include "gc/g1/g1Allocator.inline.hpp"
35 #include "gc/g1/g1CollectedHeap.inline.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1EvacFailure.hpp"
39 #include "gc/g1/g1GCPhaseTimes.hpp"
40 #include "gc/g1/g1MarkSweep.hpp"
41 #include "gc/g1/g1OopClosures.inline.hpp"
42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
43 #include "gc/g1/g1RegionToSpaceMapper.hpp"
44 #include "gc/g1/g1RemSet.inline.hpp"
45 #include "gc/g1/g1RootClosures.hpp"
46 #include "gc/g1/g1RootProcessor.hpp"
47 #include "gc/g1/g1StringDedup.hpp"
48 #include "gc/g1/g1YCTypes.hpp"
49 #include "gc/g1/heapRegion.inline.hpp"
50 #include "gc/g1/heapRegionRemSet.hpp"
51 #include "gc/g1/heapRegionSet.inline.hpp"
52 #include "gc/g1/suspendibleThreadSet.hpp"
53 #include "gc/g1/vm_operations_g1.hpp"
54 #include "gc/shared/gcHeapSummary.hpp"
55 #include "gc/shared/gcId.hpp"
56 #include "gc/shared/gcLocker.inline.hpp"
57 #include "gc/shared/gcTimer.hpp"
58 #include "gc/shared/gcTrace.hpp"
59 #include "gc/shared/gcTraceTime.hpp"
60 #include "gc/shared/generationSpec.hpp"
61 #include "gc/shared/isGCActiveMark.hpp"
62 #include "gc/shared/referenceProcessor.hpp"
63 #include "gc/shared/taskqueue.inline.hpp"
64 #include "logging/log.hpp"
65 #include "memory/allocation.hpp"
66 #include "memory/iterator.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "runtime/atomic.inline.hpp"
69 #include "runtime/init.hpp"
70 #include "runtime/orderAccess.inline.hpp"
71 #include "runtime/vmThread.hpp"
72 #include "utilities/globalDefinitions.hpp"
73 #include "utilities/stack.inline.hpp"
74
75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
76
77 // INVARIANTS/NOTES
78 //
79 // All allocation activity covered by the G1CollectedHeap interface is
80 // serialized by acquiring the HeapLock. This happens in mem_allocate
81 // and allocate_new_tlab, which are the "entry" points to the
82 // allocation code from the rest of the JVM. (Note that this does not
83 // apply to TLAB allocation, which is not part of this interface: it
84 // is done by clients of this interface.)
186 } while (hr != head);
187 assert(hr != NULL, "invariant");
188 hr->set_next_dirty_cards_region(NULL);
189 return hr;
190 }
191
192 // Returns true if the reference points to an object that
193 // can move in an incremental collection.
194 bool G1CollectedHeap::is_scavengable(const void* p) {
195 HeapRegion* hr = heap_region_containing(p);
196 return !hr->is_pinned();
197 }
198
199 // Private methods.
200
201 HeapRegion*
202 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
203 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
204 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
205 if (!_secondary_free_list.is_empty()) {
206 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
207 "secondary_free_list has %u entries",
208 _secondary_free_list.length());
209 // It looks as if there are free regions available on the
210 // secondary_free_list. Let's move them to the free_list and try
211 // again to allocate from it.
212 append_secondary_free_list();
213
214 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
215 "empty we should have moved at least one entry to the free_list");
216 HeapRegion* res = _hrm.allocate_free_region(is_old);
217 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
218 "allocated " HR_FORMAT " from secondary_free_list",
219 HR_FORMAT_PARAMS(res));
220 return res;
221 }
222
223 // Wait here until we get notified either when (a) there are no
224 // more free regions coming or (b) some regions have been moved on
225 // the secondary_free_list.
226 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
227 }
228
229 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
230 "could not allocate from secondary_free_list");
231 return NULL;
232 }
233
234 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
235 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
236 "the only time we use this to allocate a humongous region is "
237 "when we are allocating a single humongous region");
238
239 HeapRegion* res;
240 if (G1StressConcRegionFreeing) {
241 if (!_secondary_free_list.is_empty()) {
242 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
243 "forced to look at the secondary_free_list");
244 res = new_region_try_secondary_free_list(is_old);
245 if (res != NULL) {
246 return res;
247 }
248 }
249 }
250
251 res = _hrm.allocate_free_region(is_old);
252
253 if (res == NULL) {
254 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
255 "res == NULL, trying the secondary_free_list");
256 res = new_region_try_secondary_free_list(is_old);
257 }
258 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
259 // Currently, only attempts to allocate GC alloc regions set
260 // do_expand to true. So, we should only reach here during a
261 // safepoint. If this assumption changes we might have to
262 // reconsider the use of _expand_heap_after_alloc_failure.
263 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
264
265 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
266 word_size * HeapWordSize);
267
268 if (expand(word_size * HeapWordSize)) {
269 // Given that expand() succeeded in expanding the heap, and we
270 // always expand the heap by an amount aligned to the heap
271 // region size, the free list should in theory not be empty.
272 // In either case allocate_free_region() will check for NULL.
273 res = _hrm.allocate_free_region(is_old);
274 } else {
275 _expand_heap_after_alloc_failure = false;
276 }
277 }
278 return res;
279 }
280
281 HeapWord*
282 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
283 uint num_regions,
284 size_t word_size,
285 AllocationContext_t context) {
286 assert(first != G1_NO_HRM_INDEX, "pre-condition");
287 assert(is_humongous(word_size), "word_size should be humongous");
418 // potentially waits for regions from the secondary free list.
419 wait_while_free_regions_coming();
420 append_secondary_free_list_if_not_empty_with_lock();
421
422 // Policy: Try only empty regions (i.e. already committed first). Maybe we
423 // are lucky enough to find some.
424 first = _hrm.find_contiguous_only_empty(obj_regions);
425 if (first != G1_NO_HRM_INDEX) {
426 _hrm.allocate_free_regions_starting_at(first, obj_regions);
427 }
428 }
429
430 if (first == G1_NO_HRM_INDEX) {
431 // Policy: We could not find enough regions for the humongous object in the
432 // free list. Look through the heap to find a mix of free and uncommitted regions.
433 // If so, try expansion.
434 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
435 if (first != G1_NO_HRM_INDEX) {
436 // We found something. Make sure these regions are committed, i.e. expand
437 // the heap. Alternatively we could do a defragmentation GC.
438 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
439 word_size * HeapWordSize);
440
441
442 _hrm.expand_at(first, obj_regions);
443 g1_policy()->record_new_heap_size(num_regions());
444
445 #ifdef ASSERT
446 for (uint i = first; i < first + obj_regions; ++i) {
447 HeapRegion* hr = region_at(i);
448 assert(hr->is_free(), "sanity");
449 assert(hr->is_empty(), "sanity");
450 assert(is_on_master_free_list(hr), "sanity");
451 }
452 #endif
453 _hrm.allocate_free_regions_starting_at(first, obj_regions);
454 } else {
455 // Policy: Potentially trigger a defragmentation GC.
456 }
457 }
458
459 HeapWord* result = NULL;
460 if (first != G1_NO_HRM_INDEX) {
461 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
739 HeapRegion* start_region = _hrm.addr_to_region(start_address);
740 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
741 start_address = start_region->end();
742 if (start_address > last_address) {
743 increase_used(word_size * HeapWordSize);
744 start_region->set_top(last_address + 1);
745 continue;
746 }
747 start_region->set_top(start_address);
748 curr_range = MemRegion(start_address, last_address + 1);
749 start_region = _hrm.addr_to_region(start_address);
750 }
751
752 // Perform the actual region allocation, exiting if it fails.
753 // Then note how much new space we have allocated.
754 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
755 return false;
756 }
757 increase_used(word_size * HeapWordSize);
758 if (commits != 0) {
759 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
760 HeapRegion::GrainWords * HeapWordSize * commits);
761
762 }
763
764 // Mark each G1 region touched by the range as archive, add it to the old set,
765 // and set the allocation context and top.
766 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
767 HeapRegion* last_region = _hrm.addr_to_region(last_address);
768 prev_last_region = last_region;
769
770 while (curr_region != NULL) {
771 assert(curr_region->is_empty() && !curr_region->is_pinned(),
772 "Region already in use (index %u)", curr_region->hrm_index());
773 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
774 curr_region->set_allocation_context(AllocationContext::system());
775 curr_region->set_archive();
776 _old_set.add(curr_region);
777 if (curr_region != last_region) {
778 curr_region->set_top(curr_region->end());
779 curr_region = _hrm.next_region_in_heap(curr_region);
780 } else {
781 curr_region->set_top(last_address + 1);
922 guarantee(curr_region->is_archive(),
923 "Expected archive region at index %u", curr_region->hrm_index());
924 uint curr_index = curr_region->hrm_index();
925 _old_set.remove(curr_region);
926 curr_region->set_free();
927 curr_region->set_top(curr_region->bottom());
928 if (curr_region != last_region) {
929 curr_region = _hrm.next_region_in_heap(curr_region);
930 } else {
931 curr_region = NULL;
932 }
933 _hrm.shrink_at(curr_index, 1);
934 uncommitted_regions++;
935 }
936
937 // Notify mark-sweep that this is no longer an archive range.
938 G1MarkSweep::set_range_archive(ranges[i], false);
939 }
940
941 if (uncommitted_regions != 0) {
942 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
943 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
944 }
945 decrease_used(size_used);
946 }
947
948 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
949 uint* gc_count_before_ret,
950 uint* gclocker_retry_count_ret) {
951 // The structure of this method has a lot of similarities to
952 // attempt_allocation_slow(). The reason these two were not merged
953 // into a single one is that such a method would require several "if
954 // allocation is not humongous do this, otherwise do that"
955 // conditional paths which would obscure its flow. In fact, an early
956 // version of this code did use a unified method which was harder to
957 // follow and, as a result, it had subtle bugs that were hard to
958 // track down. So keeping these two methods separate allows each to
959 // be more readable. It will be good to keep these two in sync as
960 // much as possible.
961
962 assert_heap_not_locked_and_not_at_safepoint();
1160 // We only generate output for non-empty regions.
1161 } else if (hr->is_starts_humongous()) {
1162 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1163 } else if (hr->is_continues_humongous()) {
1164 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1165 } else if (hr->is_archive()) {
1166 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1167 } else if (hr->is_old()) {
1168 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1169 } else {
1170 ShouldNotReachHere();
1171 }
1172 return false;
1173 }
1174
1175 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1176 : _hr_printer(hr_printer) { }
1177 };
1178
1179 void G1CollectedHeap::print_hrm_post_compaction() {
1180 if (_hr_printer.is_active()) {
1181 PostCompactionPrinterClosure cl(hr_printer());
1182 heap_region_iterate(&cl);
1183 }
1184
1185 }
1186
1187 bool G1CollectedHeap::do_collection(bool explicit_gc,
1188 bool clear_all_soft_refs,
1189 size_t word_size) {
1190 assert_at_safepoint(true /* should_be_vm_thread */);
1191
1192 if (GC_locker::check_active_before_gc()) {
1193 return false;
1194 }
1195
1196 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1197 gc_timer->register_gc_start();
1198
1199 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1200 GCIdMark gc_id_mark;
1201 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1202
1203 SvcGCMarker sgcm(SvcGCMarker::FULL);
1204 ResourceMark rm;
1205
1206 print_heap_before_gc();
1207 trace_heap_before_gc(gc_tracer);
1208
1209 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1210
1211 verify_region_sets_optional();
1212
1213 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1214 collector_policy()->should_clear_all_soft_refs();
1215
1216 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1217
1218 {
1219 IsGCActiveMark x;
1220
1221 // Timing
1222 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1223 GCTraceCPUTime tcpu;
1224
1225 {
1226 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1227 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1228 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1229
1230 g1_policy()->record_full_collection_start();
1231
1232 // Note: When we have a more flexible GC logging framework that
1233 // allows us to add optional attributes to a GC log record we
1234 // could consider timing and reporting how long we wait in the
1235 // following two methods.
1236 wait_while_free_regions_coming();
1237 // If we start the compaction before the CM threads finish
1238 // scanning the root regions we might trip them over as we'll
1239 // be moving objects / updating references. So let's wait until
1240 // they are done. By telling them to abort, they should complete
1241 // early.
1242 _cm->root_regions()->abort();
1243 _cm->root_regions()->wait_until_scan_finished();
1244 append_secondary_free_list_if_not_empty_with_lock();
1245
1246 gc_prologue(true);
1257 #if defined(COMPILER2) || INCLUDE_JVMCI
1258 DerivedPointerTable::clear();
1259 #endif
1260
1261 // Disable discovery and empty the discovered lists
1262 // for the CM ref processor.
1263 ref_processor_cm()->disable_discovery();
1264 ref_processor_cm()->abandon_partial_discovery();
1265 ref_processor_cm()->verify_no_references_recorded();
1266
1267 // Abandon current iterations of concurrent marking and concurrent
1268 // refinement, if any are in progress. We have to do this before
1269 // wait_until_scan_finished() below.
1270 concurrent_mark()->abort();
1271
1272 // Make sure we'll choose a new allocation region afterwards.
1273 _allocator->release_mutator_alloc_region();
1274 _allocator->abandon_gc_alloc_regions();
1275 g1_rem_set()->cleanupHRRS();
1276
1277 // We may have added regions to the current incremental collection
1278 // set between the last GC or pause and now. We need to clear the
1279 // incremental collection set and then start rebuilding it afresh
1280 // after this full GC.
1281 abandon_collection_set(g1_policy()->inc_cset_head());
1282 g1_policy()->clear_incremental_cset();
1283 g1_policy()->stop_incremental_cset_building();
1284
1285 tear_down_region_sets(false /* free_list_only */);
1286 collector_state()->set_gcs_are_young(true);
1287
1288 // See the comments in g1CollectedHeap.hpp and
1289 // G1CollectedHeap::ref_processing_init() about
1290 // how reference processing currently works in G1.
1291
1292 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1293 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1294
1295 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1296 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1324 ClassLoaderDataGraph::purge();
1325 MetaspaceAux::verify_metrics();
1326
1327 // Note: since we've just done a full GC, concurrent
1328 // marking is no longer active. Therefore we need not
1329 // re-enable reference discovery for the CM ref processor.
1330 // That will be done at the start of the next marking cycle.
1331 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1332 ref_processor_cm()->verify_no_references_recorded();
1333
1334 reset_gc_time_stamp();
1335 // Since everything potentially moved, we will clear all remembered
1336 // sets, and clear all cards. Later we will rebuild remembered
1337 // sets. We will also reset the GC time stamps of the regions.
1338 clear_rsets_post_compaction();
1339 check_gc_time_stamps();
1340
1341 // Resize the heap if necessary.
1342 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1343
1344 // We should do this after we potentially resize the heap so
1345 // that all the COMMIT / UNCOMMIT events are generated before
1346 // the compaction events.
1347 print_hrm_post_compaction();
1348
1349 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1350 if (hot_card_cache->use_cache()) {
1351 hot_card_cache->reset_card_counts();
1352 hot_card_cache->reset_hot_cache();
1353 }
1354
1355 // Rebuild remembered sets of all regions.
1356 uint n_workers =
1357 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1358 workers()->active_workers(),
1359 Threads::number_of_non_daemon_threads());
1360 workers()->set_active_workers(n_workers);
1361
1362 ParRebuildRSTask rebuild_rs_task(this);
1363 workers()->run_task(&rebuild_rs_task);
1364
1365 // Rebuild the strong code root lists for each region
1366 rebuild_strong_code_roots();
1367
1396 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1397 // objects marked during a full GC against the previous bitmap.
1398 // But we need to clear it before calling check_bitmaps below since
1399 // the full GC has compacted objects and updated TAMS but not updated
1400 // the prev bitmap.
1401 if (G1VerifyBitmaps) {
1402 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1403 }
1404 check_bitmaps("Full GC End");
1405
1406 // Start a new incremental collection set for the next pause
1407 assert(g1_policy()->collection_set() == NULL, "must be");
1408 g1_policy()->start_incremental_cset_building();
1409
1410 clear_cset_fast_test();
1411
1412 _allocator->init_mutator_alloc_region();
1413
1414 g1_policy()->record_full_collection_end();
1415
1416 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1417 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1418 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1419 // before any GC notifications are raised.
1420 g1mm()->update_sizes();
1421
1422 gc_epilogue(true);
1423 }
1424
1425 g1_policy()->print_detailed_heap_transition();
1426
1427 print_heap_after_gc();
1428 trace_heap_after_gc(gc_tracer);
1429
1430 post_full_gc_dump(gc_timer);
1431
1432 gc_timer->register_gc_end();
1433 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1434 }
1435
1436 return true;
1437 }
1438
1439 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1440 // do_collection() will return whether it succeeded in performing
1441 // the GC. Currently, there is no facility on the
1442 // do_full_collection() API to notify the caller than the collection
1443 // did not succeed (e.g., because it was locked out by the GC
1444 // locker). So, right now, we'll ignore the return value.
1445 bool dummy = do_collection(true, /* explicit_gc */
1490
1491 // This assert only makes sense here, before we adjust them
1492 // with respect to the min and max heap size.
1493 assert(minimum_desired_capacity <= maximum_desired_capacity,
1494 "minimum_desired_capacity = " SIZE_FORMAT ", "
1495 "maximum_desired_capacity = " SIZE_FORMAT,
1496 minimum_desired_capacity, maximum_desired_capacity);
1497
1498 // Should not be greater than the heap max size. No need to adjust
1499 // it with respect to the heap min size as it's a lower bound (i.e.,
1500 // we'll try to make the capacity larger than it, not smaller).
1501 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1502 // Should not be less than the heap min size. No need to adjust it
1503 // with respect to the heap max size as it's an upper bound (i.e.,
1504 // we'll try to make the capacity smaller than it, not greater).
1505 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1506
1507 if (capacity_after_gc < minimum_desired_capacity) {
1508 // Don't expand unless it's significant
1509 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1510
1511 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1512 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1513 capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1514
1515 expand(expand_bytes);
1516
1517 // No expansion, now see if we want to shrink
1518 } else if (capacity_after_gc > maximum_desired_capacity) {
1519 // Capacity too large, compute shrinking size
1520 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1521
1522 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1523 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1524 capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1525
1526 shrink(shrink_bytes);
1527 }
1528 }
1529
1530 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1531 AllocationContext_t context,
1532 bool do_gc,
1533 bool clear_all_soft_refs,
1534 bool expect_null_mutator_alloc_region,
1535 bool* gc_succeeded) {
1536 *gc_succeeded = true;
1537 // Let's attempt the allocation first.
1538 HeapWord* result =
1539 attempt_allocation_at_safepoint(word_size,
1540 context,
1541 expect_null_mutator_alloc_region);
1542 if (result != NULL) {
1543 assert(*gc_succeeded, "sanity");
1544 return result;
1545 }
1612
1613 // What else? We might try synchronous finalization later. If the total
1614 // space available is large enough for the allocation, then a more
1615 // complete compaction phase than we've tried so far might be
1616 // appropriate.
1617 assert(*succeeded, "sanity");
1618 return NULL;
1619 }
1620
1621 // Attempting to expand the heap sufficiently
1622 // to support an allocation of the given "word_size". If
1623 // successful, perform the allocation and return the address of the
1624 // allocated block, or else "NULL".
1625
1626 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1627 assert_at_safepoint(true /* should_be_vm_thread */);
1628
1629 verify_region_sets_optional();
1630
1631 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1632 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1633 word_size * HeapWordSize);
1634
1635
1636 if (expand(expand_bytes)) {
1637 _hrm.verify_optional();
1638 verify_region_sets_optional();
1639 return attempt_allocation_at_safepoint(word_size,
1640 context,
1641 false /* expect_null_mutator_alloc_region */);
1642 }
1643 return NULL;
1644 }
1645
1646 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1647 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1648 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1649 HeapRegion::GrainBytes);
1650
1651 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1652 expand_bytes, aligned_expand_bytes);
1653
1654 if (is_maximal_no_gc()) {
1655 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1656 return false;
1657 }
1658
1659 double expand_heap_start_time_sec = os::elapsedTime();
1660 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1661 assert(regions_to_expand > 0, "Must expand by at least one region");
1662
1663 uint expanded_by = _hrm.expand_by(regions_to_expand);
1664 if (expand_time_ms != NULL) {
1665 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1666 }
1667
1668 if (expanded_by > 0) {
1669 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1670 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1671 g1_policy()->record_new_heap_size(num_regions());
1672 } else {
1673 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1674
1675 // The expansion of the virtual storage space was unsuccessful.
1676 // Let's see if it was because we ran out of swap.
1677 if (G1ExitOnExpansionFailure &&
1678 _hrm.available() >= regions_to_expand) {
1679 // We had head room...
1680 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1681 }
1682 }
1683 return regions_to_expand > 0;
1684 }
1685
1686 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1687 size_t aligned_shrink_bytes =
1688 ReservedSpace::page_align_size_down(shrink_bytes);
1689 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1690 HeapRegion::GrainBytes);
1691 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1692
1693 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1694 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1695
1696
1697 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1698 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1699 if (num_regions_removed > 0) {
1700 g1_policy()->record_new_heap_size(num_regions());
1701 } else {
1702 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1703 }
1704 }
1705
1706 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1707 verify_region_sets_optional();
1708
1709 // We should only reach here at the end of a Full GC which means we
1710 // should not not be holding to any GC alloc regions. The method
1711 // below will make sure of that and do any remaining clean up.
1712 _allocator->abandon_gc_alloc_regions();
1713
1714 // Instead of tearing down / rebuilding the free lists here, we
1715 // could instead use the remove_all_pending() method on free_list to
1716 // remove only the ones that we need to remove.
1717 tear_down_region_sets(true /* free_list_only */);
1718 shrink_helper(shrink_bytes);
1719 rebuild_region_sets(true /* free_list_only */);
1720
1721 _hrm.verify_optional();
1722 verify_region_sets_optional();
1794 // Initialize the G1EvacuationFailureALot counters and flags.
1795 NOT_PRODUCT(reset_evacuation_should_fail();)
1796
1797 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1798 }
1799
1800 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1801 size_t size,
1802 size_t translation_factor) {
1803 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1804 // Allocate a new reserved space, preferring to use large pages.
1805 ReservedSpace rs(size, preferred_page_size);
1806 G1RegionToSpaceMapper* result =
1807 G1RegionToSpaceMapper::create_mapper(rs,
1808 size,
1809 rs.alignment(),
1810 HeapRegion::GrainBytes,
1811 translation_factor,
1812 mtGC);
1813 if (TracePageSizes) {
1814 tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1815 description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1816 }
1817 return result;
1818 }
1819
1820 jint G1CollectedHeap::initialize() {
1821 CollectedHeap::pre_initialize();
1822 os::enable_vtime();
1823
1824 // Necessary to satisfy locking discipline assertions.
1825
1826 MutexLocker x(Heap_lock);
1827
1828 // While there are no constraints in the GC code that HeapWordSize
1829 // be any particular value, there are multiple other areas in the
1830 // system which believe this to be true (e.g. oop->object_size in some
1831 // cases incorrectly returns the size in wordSize units rather than
1832 // HeapWordSize).
1833 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1834
1835 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1836 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1837 size_t heap_alignment = collector_policy()->heap_alignment();
1838
1839 // Ensure that the sizes are properly aligned.
1840 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1841 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1842 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1843
1844 _refine_cte_cl = new RefineCardTableEntryClosure();
1845
1846 jint ecode = JNI_OK;
1847 _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);
2000 G1AllocRegion::setup(this, dummy_region);
2001
2002 _allocator->init_mutator_alloc_region();
2003
2004 // Do create of the monitoring and management support so that
2005 // values in the heap have been properly initialized.
2006 _g1mm = new G1MonitoringSupport(this);
2007
2008 G1StringDedup::initialize();
2009
2010 _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2011 for (uint i = 0; i < ParallelGCThreads; i++) {
2012 new (&_preserved_objs[i]) OopAndMarkOopStack();
2013 }
2014
2015 return JNI_OK;
2016 }
2017
2018 void G1CollectedHeap::stop() {
2019 // Stop all concurrent threads. We do this to make sure these threads
2020 // do not continue to execute and access resources (e.g. logging)
2021 // that are destroyed during shutdown.
2022 _cg1r->stop();
2023 _cmThread->stop();
2024 if (G1StringDedup::is_enabled()) {
2025 G1StringDedup::stop();
2026 }
2027 }
2028
2029 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2030 return HeapRegion::max_region_size();
2031 }
2032
2033 void G1CollectedHeap::post_initialize() {
2034 CollectedHeap::post_initialize();
2035 ref_processing_init();
2036 }
2037
2038 void G1CollectedHeap::ref_processing_init() {
2039 // Reference processing in G1 currently works as follows:
2040 //
2117 }
2118
2119 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2120 hr->reset_gc_time_stamp();
2121 }
2122
2123 #ifndef PRODUCT
2124
2125 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2126 private:
2127 unsigned _gc_time_stamp;
2128 bool _failures;
2129
2130 public:
2131 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2132 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2133
2134 virtual bool doHeapRegion(HeapRegion* hr) {
2135 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2136 if (_gc_time_stamp != region_gc_time_stamp) {
2137 log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
2138 region_gc_time_stamp, _gc_time_stamp);
2139 _failures = true;
2140 }
2141 return false;
2142 }
2143
2144 bool failures() { return _failures; }
2145 };
2146
2147 void G1CollectedHeap::check_gc_time_stamps() {
2148 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2149 heap_region_iterate(&cl);
2150 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2151 }
2152 #endif // PRODUCT
2153
2154 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2155 _cg1r->hot_card_cache()->drain(cl, worker_i);
2156 }
2157
2701 private:
2702 G1CollectedHeap* _g1h;
2703 VerifyOption _vo;
2704 bool _failures;
2705 public:
2706 // _vo == UsePrevMarking -> use "prev" marking information,
2707 // _vo == UseNextMarking -> use "next" marking information,
2708 // _vo == UseMarkWord -> use mark word from object header.
2709 VerifyRootsClosure(VerifyOption vo) :
2710 _g1h(G1CollectedHeap::heap()),
2711 _vo(vo),
2712 _failures(false) { }
2713
2714 bool failures() { return _failures; }
2715
2716 template <class T> void do_oop_nv(T* p) {
2717 T heap_oop = oopDesc::load_heap_oop(p);
2718 if (!oopDesc::is_null(heap_oop)) {
2719 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2720 if (_g1h->is_obj_dead_cond(obj, _vo)) {
2721 LogHandle(gc, verify) log;
2722 log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2723 if (_vo == VerifyOption_G1UseMarkWord) {
2724 log.info(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2725 }
2726 ResourceMark rm;
2727 obj->print_on(log.info_stream());
2728 _failures = true;
2729 }
2730 }
2731 }
2732
2733 void do_oop(oop* p) { do_oop_nv(p); }
2734 void do_oop(narrowOop* p) { do_oop_nv(p); }
2735 };
2736
2737 class G1VerifyCodeRootOopClosure: public OopClosure {
2738 G1CollectedHeap* _g1h;
2739 OopClosure* _root_cl;
2740 nmethod* _nm;
2741 VerifyOption _vo;
2742 bool _failures;
2743
2744 template <class T> void do_oop_work(T* p) {
2745 // First verify that this root is live
2746 _root_cl->do_oop(p);
2747
2752
2753 // Don't check the code roots during marking verification in a full GC
2754 if (_vo == VerifyOption_G1UseMarkWord) {
2755 return;
2756 }
2757
2758 // Now verify that the current nmethod (which contains p) is
2759 // in the code root list of the heap region containing the
2760 // object referenced by p.
2761
2762 T heap_oop = oopDesc::load_heap_oop(p);
2763 if (!oopDesc::is_null(heap_oop)) {
2764 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2765
2766 // Now fetch the region containing the object
2767 HeapRegion* hr = _g1h->heap_region_containing(obj);
2768 HeapRegionRemSet* hrrs = hr->rem_set();
2769 // Verify that the strong code root list for this region
2770 // contains the nmethod
2771 if (!hrrs->strong_code_roots_list_contains(_nm)) {
2772 log_info(gc, verify)("Code root location " PTR_FORMAT " "
2773 "from nmethod " PTR_FORMAT " not in strong "
2774 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2775 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2776 _failures = true;
2777 }
2778 }
2779 }
2780
2781 public:
2782 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2783 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2784
2785 void do_oop(oop* p) { do_oop_work(p); }
2786 void do_oop(narrowOop* p) { do_oop_work(p); }
2787
2788 void set_nmethod(nmethod* nm) { _nm = nm; }
2789 bool failures() { return _failures; }
2790 };
2791
2792 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2933 }
2934
2935 bool doHeapRegion(HeapRegion* r) {
2936 // For archive regions, verify there are no heap pointers to
2937 // non-pinned regions. For all others, verify liveness info.
2938 if (r->is_archive()) {
2939 VerifyArchiveRegionClosure verify_oop_pointers(r);
2940 r->object_iterate(&verify_oop_pointers);
2941 return true;
2942 }
2943 if (!r->is_continues_humongous()) {
2944 bool failures = false;
2945 r->verify(_vo, &failures);
2946 if (failures) {
2947 _failures = true;
2948 } else if (!r->is_starts_humongous()) {
2949 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2950 r->object_iterate(¬_dead_yet_cl);
2951 if (_vo != VerifyOption_G1UseNextMarking) {
2952 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2953 log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
2954 p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
2955 _failures = true;
2956 }
2957 } else {
2958 // When vo == UseNextMarking we cannot currently do a sanity
2959 // check on the live bytes as the calculation has not been
2960 // finalized yet.
2961 }
2962 }
2963 }
2964 return false; // stop the region iteration if we hit a failure
2965 }
2966 };
2967
2968 // This is the task used for parallel verification of the heap regions
2969
2970 class G1ParVerifyTask: public AbstractGangTask {
2971 private:
2972 G1CollectedHeap* _g1h;
2973 VerifyOption _vo;
2974 bool _failures;
2982 AbstractGangTask("Parallel verify task"),
2983 _g1h(g1h),
2984 _vo(vo),
2985 _failures(false),
2986 _hrclaimer(g1h->workers()->active_workers()) {}
2987
2988 bool failures() {
2989 return _failures;
2990 }
2991
2992 void work(uint worker_id) {
2993 HandleMark hm;
2994 VerifyRegionClosure blk(true, _vo);
2995 _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
2996 if (blk.failures()) {
2997 _failures = true;
2998 }
2999 }
3000 };
3001
3002 void G1CollectedHeap::verify(VerifyOption vo) {
3003 if (!SafepointSynchronize::is_at_safepoint()) {
3004 log_info(gc, verify)("Skipping verification. Not at safepoint.");
3005 }
3006
3007 assert(Thread::current()->is_VM_thread(),
3008 "Expected to be executed serially by the VM thread at this point");
3009
3010 log_debug(gc, verify)("Roots");
3011 VerifyRootsClosure rootsCl(vo);
3012 VerifyKlassClosure klassCl(this, &rootsCl);
3013 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3014
3015 // We apply the relevant closures to all the oops in the
3016 // system dictionary, class loader data graph, the string table
3017 // and the nmethods in the code cache.
3018 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3019 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3020
3021 {
3022 G1RootProcessor root_processor(this, 1);
3023 root_processor.process_all_roots(&rootsCl,
3024 &cldCl,
3025 &blobsCl);
3026 }
3027
3028 bool failures = rootsCl.failures() || codeRootsCl.failures();
3029
3030 if (vo != VerifyOption_G1UseMarkWord) {
3031 // If we're verifying during a full GC then the region sets
3032 // will have been torn down at the start of the GC. Therefore
3033 // verifying the region sets will fail. So we only verify
3034 // the region sets when not in a full GC.
3035 log_debug(gc, verify)("HeapRegionSets");
3036 verify_region_sets();
3037 }
3038
3039 log_debug(gc, verify)("HeapRegions");
3040 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3041
3042 G1ParVerifyTask task(this, vo);
3043 workers()->run_task(&task);
3044 if (task.failures()) {
3045 failures = true;
3046 }
3047
3048 } else {
3049 VerifyRegionClosure blk(false, vo);
3050 heap_region_iterate(&blk);
3051 if (blk.failures()) {
3052 failures = true;
3053 }
3054 }
3055
3056 if (G1StringDedup::is_enabled()) {
3057 log_debug(gc, verify)("StrDedup");
3058 G1StringDedup::verify();
3059 }
3060
3061 if (failures) {
3062 log_info(gc, verify)("Heap after failed verification:");
3063 // It helps to have the per-region information in the output to
3064 // help us track down what went wrong. This is why we call
3065 // print_extended_on() instead of print_on().
3066 LogHandle(gc, verify) log;
3067 ResourceMark rm;
3068 print_extended_on(log.info_stream());
3069 }
3070 guarantee(!failures, "there should not have been any failures");
3071 }
3072
3073 double G1CollectedHeap::verify(bool guard, const char* msg) {
3074 double verify_time_ms = 0.0;
3075
3076 if (guard && total_collections() >= VerifyGCStartAt) {
3077 double verify_start = os::elapsedTime();
3078 HandleMark hm; // Discard invalid handles created during verification
3079 prepare_for_verify();
3080 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3081 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3082 }
3083
3084 return verify_time_ms;
3085 }
3086
3087 void G1CollectedHeap::verify_before_gc() {
3088 double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3089 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3090 }
3091
3092 void G1CollectedHeap::verify_after_gc() {
3093 double verify_time_ms = verify(VerifyAfterGC, "After GC");
3094 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3095 }
3096
3097 class PrintRegionClosure: public HeapRegionClosure {
3098 outputStream* _st;
3099 public:
3100 PrintRegionClosure(outputStream* st) : _st(st) {}
3101 bool doHeapRegion(HeapRegion* r) {
3102 r->print_on(_st);
3103 return false;
3104 }
3105 };
3106
3107 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3108 const HeapRegion* hr,
3109 const VerifyOption vo) const {
3110 switch (vo) {
3111 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3112 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3113 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
3183 G1StringDedup::print_worker_threads_on(st);
3184 }
3185 }
3186
3187 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3188 workers()->threads_do(tc);
3189 tc->do_thread(_cmThread);
3190 _cg1r->threads_do(tc);
3191 if (G1StringDedup::is_enabled()) {
3192 G1StringDedup::threads_do(tc);
3193 }
3194 }
3195
3196 void G1CollectedHeap::print_tracing_info() const {
3197 // We'll overload this to mean "trace GC pause statistics."
3198 if (TraceYoungGenTime || TraceOldGenTime) {
3199 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3200 // to that.
3201 g1_policy()->print_tracing_info();
3202 }
3203 g1_rem_set()->print_summary_info();
3204 concurrent_mark()->print_summary_info();
3205 g1_policy()->print_yg_surv_rate_info();
3206 }
3207
3208 #ifndef PRODUCT
3209 // Helpful for debugging RSet issues.
3210
3211 class PrintRSetsClosure : public HeapRegionClosure {
3212 private:
3213 const char* _msg;
3214 size_t _occupied_sum;
3215
3216 public:
3217 bool doHeapRegion(HeapRegion* r) {
3218 HeapRegionRemSet* hrrs = r->rem_set();
3219 size_t occupied = hrrs->occupied();
3220 _occupied_sum += occupied;
3221
3222 tty->print_cr("Printing RSet for region " HR_FORMAT,
3223 HR_FORMAT_PARAMS(r));
3224 if (occupied == 0) {
3225 tty->print_cr(" RSet is empty");
3226 } else {
3227 hrrs->print();
3228 }
3229 tty->print_cr("----------");
3230 return false;
3231 }
3232
3233 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3234 tty->cr();
3235 tty->print_cr("========================================");
3236 tty->print_cr("%s", msg);
3237 tty->cr();
3238 }
3239
3240 ~PrintRSetsClosure() {
3241 tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3242 tty->print_cr("========================================");
3243 tty->cr();
3244 }
3245 };
3246
3247 void G1CollectedHeap::print_cset_rsets() {
3248 PrintRSetsClosure cl("Printing CSet RSets");
3249 collection_set_iterate(&cl);
3250 }
3251
3252 void G1CollectedHeap::print_all_rsets() {
3253 PrintRSetsClosure cl("Printing All RSets");;
3254 heap_region_iterate(&cl);
3255 }
3256 #endif // PRODUCT
3257
3258 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3259 YoungList* young_list = heap()->young_list();
3260
3261 size_t eden_used_bytes = young_list->eden_used_bytes();
3262 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3263
3281
3282 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3283 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3284 }
3285
3286
3287 G1CollectedHeap* G1CollectedHeap::heap() {
3288 CollectedHeap* heap = Universe::heap();
3289 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3290 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3291 return (G1CollectedHeap*)heap;
3292 }
3293
3294 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3295 // always_do_update_barrier = false;
3296 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3297 // Fill TLAB's and such
3298 accumulate_statistics_all_tlabs();
3299 ensure_parsability(true);
3300
3301 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
3302 }
3303
3304 void G1CollectedHeap::gc_epilogue(bool full) {
3305 // we are at the end of the GC. Total collections has already been increased.
3306 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
3307
3308 // FIXME: what is this about?
3309 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3310 // is set.
3311 #if defined(COMPILER2) || INCLUDE_JVMCI
3312 assert(DerivedPointerTable::is_empty(), "derived pointer present");
3313 #endif
3314 // always_do_update_barrier = true;
3315
3316 resize_all_tlabs();
3317 allocation_context_stats().update(full);
3318
3319 // We have just completed a GC. Update the soft reference
3320 // policy with the new heap occupancy
3321 Universe::update_heap_info_at_gc();
3322 }
3323
3324 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3325 uint gc_count_before,
3326 bool* succeeded,
3529 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3530
3531 // Here's a good place to add any other checks we'd like to
3532 // perform on CSet regions.
3533 return false;
3534 }
3535 };
3536 #endif // ASSERT
3537
3538 uint G1CollectedHeap::num_task_queues() const {
3539 return _task_queues->size();
3540 }
3541
3542 #if TASKQUEUE_STATS
3543 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3544 st->print_raw_cr("GC Task Stats");
3545 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3546 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3547 }
3548
3549 void G1CollectedHeap::print_taskqueue_stats() const {
3550 LogHandle(gc, task, stats) log;
3551 if (!log.is_develop()) {
3552 return;
3553 }
3554 ResourceMark rm;
3555 outputStream* st = log.develop_stream();
3556
3557 print_taskqueue_stats_hdr(st);
3558
3559 TaskQueueStats totals;
3560 const uint n = num_task_queues();
3561 for (uint i = 0; i < n; ++i) {
3562 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3563 totals += task_queue(i)->stats;
3564 }
3565 st->print_raw("tot "); totals.print(st); st->cr();
3566
3567 DEBUG_ONLY(totals.verify());
3568 }
3569
3570 void G1CollectedHeap::reset_taskqueue_stats() {
3571 const uint n = num_task_queues();
3572 for (uint i = 0; i < n; ++i) {
3573 task_queue(i)->stats.reset();
3574 }
3575 }
3576 #endif // TASKQUEUE_STATS
3577
3578 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {
3579 if (evacuation_failed()) {
3580 log_info(gc)("To-space exhausted");
3581 }
3582
3583 double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3584 g1_policy()->print_phases(pause_time_sec);
3585
3586 g1_policy()->print_detailed_heap_transition();
3587 }
3588
3589
3590 void G1CollectedHeap::wait_for_root_region_scanning() {
3591 double scan_wait_start = os::elapsedTime();
3592 // We have to wait until the CM threads finish scanning the
3593 // root regions as it's the only way to ensure that all the
3594 // objects on them have been correctly scanned before we start
3595 // moving them during the GC.
3596 bool waited = _cm->root_regions()->wait_until_scan_finished();
3597 double wait_time_ms = 0.0;
3598 if (waited) {
3599 double scan_wait_end = os::elapsedTime();
3600 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3601 }
3602 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3603 }
3604
3605 bool
3606 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3607 assert_at_safepoint(true /* should_be_vm_thread */);
3608 guarantee(!is_gc_active(), "collection is not reentrant");
3609
3610 if (GC_locker::check_active_before_gc()) {
3611 return false;
3612 }
3613
3614 _gc_timer_stw->register_gc_start();
3615
3616 GCIdMark gc_id_mark;
3617 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3618
3619 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3620 ResourceMark rm;
3621
3622 wait_for_root_region_scanning();
3623
3624 print_heap_before_gc();
3625 trace_heap_before_gc(_gc_tracer_stw);
3626
3627 verify_region_sets_optional();
3628 verify_dirty_young_regions();
3629
3630 // This call will decide whether this pause is an initial-mark
3631 // pause. If it is, during_initial_mark_pause() will return true
3632 // for the duration of this pause.
3633 g1_policy()->decide_on_conc_mark_initiation();
3634
3635 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3636 assert(!collector_state()->during_initial_mark_pause() ||
3637 collector_state()->gcs_are_young(), "sanity");
3638
3639 // We also do not allow mixed GCs during marking.
3640 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3641
3642 // Record whether this pause is an initial mark. When the current
3643 // thread has completed its logging output and it's safe to signal
3644 // the CM thread, the flag's value in the policy has been reset.
3645 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3646
3647 // Inner scope for scope based logging, timers, and stats collection
3648 {
3649 EvacuationInfo evacuation_info;
3650
3651 if (collector_state()->during_initial_mark_pause()) {
3652 // We are about to start a marking cycle, so we increment the
3653 // full collection counter.
3654 increment_old_marking_cycles_started();
3655 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3656 }
3657
3658 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3659
3660 GCTraceCPUTime tcpu;
3661
3662 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3663 workers()->active_workers(),
3664 Threads::number_of_non_daemon_threads());
3665 workers()->set_active_workers(active_workers);
3666 FormatBuffer<> gc_string("Pause ");
3667 if (collector_state()->during_initial_mark_pause()) {
3668 gc_string.append("Initial Mark");
3669 } else if (collector_state()->gcs_are_young()) {
3670 gc_string.append("Young");
3671 } else {
3672 gc_string.append("Mixed");
3673 }
3674 GCTraceTime(Info, gc) tm5(gc_string, NULL, gc_cause(), true);
3675
3676
3677 double pause_start_sec = os::elapsedTime();
3678 double pause_start_counter = os::elapsed_counter();
3679 g1_policy()->note_gc_start(active_workers);
3680
3681 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3682 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3683
3684 // If the secondary_free_list is not empty, append it to the
3685 // free_list. No need to wait for the cleanup operation to finish;
3686 // the region allocation code will check the secondary_free_list
3687 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3688 // set, skip this step so that the region allocation code has to
3689 // get entries from the secondary_free_list.
3690 if (!G1StressConcRegionFreeing) {
3691 append_secondary_free_list_if_not_empty_with_lock();
3692 }
3693
3694 assert(check_young_list_well_formed(), "young list should be well formed");
3695
3696 // Don't dynamically change the number of GC threads this early. A value of
3697 // 0 is used to indicate serial work. When parallel work is done,
3698 // it will be set.
3699
3713 #endif
3714
3715 // Please see comment in g1CollectedHeap.hpp and
3716 // G1CollectedHeap::ref_processing_init() to see how
3717 // reference processing currently works in G1.
3718
3719 // Enable discovery in the STW reference processor
3720 ref_processor_stw()->enable_discovery();
3721
3722 {
3723 // We want to temporarily turn off discovery by the
3724 // CM ref processor, if necessary, and turn it back on
3725 // on again later if we do. Using a scoped
3726 // NoRefDiscovery object will do this.
3727 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3728
3729 // Forget the current alloc region (we might even choose it to be part
3730 // of the collection set!).
3731 _allocator->release_mutator_alloc_region();
3732
3733 // This timing is only used by the ergonomics to handle our pause target.
3734 // It is unclear why this should not include the full pause. We will
3735 // investigate this in CR 7178365.
3736 //
3737 // Preserving the old comment here if that helps the investigation:
3738 //
3739 // The elapsed time induced by the start time below deliberately elides
3740 // the possible verification above.
3741 double sample_start_time_sec = os::elapsedTime();
3742
3743 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3744
3745 if (collector_state()->during_initial_mark_pause()) {
3746 concurrent_mark()->checkpointRootsInitialPre();
3747 }
3748
3749 double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3750 g1_policy()->finalize_old_cset_part(time_remaining_ms);
3751
3752 evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3829
3830 if (collector_state()->during_initial_mark_pause()) {
3831 // We have to do this before we notify the CM threads that
3832 // they can start working to make sure that all the
3833 // appropriate initialization is done on the CM object.
3834 concurrent_mark()->checkpointRootsInitialPost();
3835 collector_state()->set_mark_in_progress(true);
3836 // Note that we don't actually trigger the CM thread at
3837 // this point. We do that later when we're sure that
3838 // the current thread has completed its logging output.
3839 }
3840
3841 allocate_dummy_regions();
3842
3843 _allocator->init_mutator_alloc_region();
3844
3845 {
3846 size_t expand_bytes = g1_policy()->expansion_amount();
3847 if (expand_bytes > 0) {
3848 size_t bytes_before = capacity();
3849 // No need for an ergo logging here,
3850 // expansion_amount() does this when it returns a value > 0.
3851 double expand_ms;
3852 if (!expand(expand_bytes, &expand_ms)) {
3853 // We failed to expand the heap. Cannot do anything about it.
3854 }
3855 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3856 }
3857 }
3858
3859 // We redo the verification but now wrt to the new CSet which
3860 // has just got initialized after the previous CSet was freed.
3861 _cm->verify_no_cset_oops();
3862 _cm->note_end_of_gc();
3863
3864 // This timing is only used by the ergonomics to handle our pause target.
3865 // It is unclear why this should not include the full pause. We will
3866 // investigate this in CR 7178365.
3867 double sample_end_time_sec = os::elapsedTime();
3868 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3869 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3889 // stamp here we invalidate all the GC time stamps on all the
3890 // regions and saved_mark_word() will simply return top() for
3891 // all the regions. This is a nicer way of ensuring this rather
3892 // than iterating over the regions and fixing them. In fact, the
3893 // GC time stamp increment here also ensures that
3894 // saved_mark_word() will return top() between pauses, i.e.,
3895 // during concurrent refinement. So we don't need the
3896 // is_gc_active() check to decided which top to use when
3897 // scanning cards (see CR 7039627).
3898 increment_gc_time_stamp();
3899
3900 verify_after_gc();
3901 check_bitmaps("GC End");
3902
3903 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3904 ref_processor_stw()->verify_no_references_recorded();
3905
3906 // CM reference discovery will be re-enabled if necessary.
3907 }
3908
3909 #ifdef TRACESPINNING
3910 ParallelTaskTerminator::print_termination_counts();
3911 #endif
3912
3913 gc_epilogue(false);
3914 }
3915
3916 // Print the remainder of the GC log output.
3917 log_gc_footer(os::elapsed_counter() - pause_start_counter);
3918
3919 // It is not yet to safe to tell the concurrent mark to
3920 // start as we have some optional output below. We don't want the
3921 // output from the concurrent mark thread interfering with this
3922 // logging output either.
3923
3924 _hrm.verify_optional();
3925 verify_region_sets_optional();
3926
3927 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3928 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3929
3930 print_heap_after_gc();
3931 trace_heap_after_gc(_gc_tracer_stw);
3932
3933 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3934 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3935 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3936 // before any GC notifications are raised.
3937 g1mm()->update_sizes();
3938
3939 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3940 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3941 _gc_timer_stw->register_gc_end();
3942 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3943 }
3944 // It should now be safe to tell the concurrent mark thread to start
3945 // without its logging output interfering with the logging output
3946 // that came from the pause.
3947
4098
4099 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4100
4101 double term_sec = 0.0;
4102 size_t evac_term_attempts = 0;
4103 {
4104 double start = os::elapsedTime();
4105 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4106 evac.do_void();
4107
4108 evac_term_attempts = evac.term_attempts();
4109 term_sec = evac.term_time();
4110 double elapsed_sec = os::elapsedTime() - start;
4111 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4112 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4113 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4114 }
4115
4116 assert(pss->queue_is_empty(), "should be empty");
4117
4118 if (log_is_enabled(Debug, gc, task, stats)) {
4119 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4120 size_t lab_waste;
4121 size_t lab_undo_waste;
4122 pss->waste(lab_waste, lab_undo_waste);
4123 _g1h->print_termination_stats(worker_id,
4124 (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */
4125 strong_roots_sec * 1000.0, /* strong roots time */
4126 term_sec * 1000.0, /* evac term time */
4127 evac_term_attempts, /* evac term attempts */
4128 lab_waste, /* alloc buffer waste */
4129 lab_undo_waste /* undo waste */
4130 );
4131 }
4132
4133 // Close the inner scope so that the ResourceMark and HandleMark
4134 // destructors are executed here and are included as part of the
4135 // "GC Worker Time".
4136 }
4137 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4138 }
4139 };
4140
4141 void G1CollectedHeap::print_termination_stats_hdr() {
4142 LogHandle(gc, task, stats) log;
4143 if (!log.is_debug()) {
4144 return;
4145 }
4146 log.debug("GC Termination Stats");
4147 log.debug(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
4148 log.debug("thr ms ms %% ms %% attempts total alloc undo");
4149 log.debug("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4150 }
4151
4152 void G1CollectedHeap::print_termination_stats(uint worker_id,
4153 double elapsed_ms,
4154 double strong_roots_ms,
4155 double term_ms,
4156 size_t term_attempts,
4157 size_t alloc_buffer_waste,
4158 size_t undo_waste) const {
4159 log_debug(gc, task, stats)
4160 ("%3d %9.2f %9.2f %6.2f "
4161 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4162 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4163 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4164 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4165 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4166 alloc_buffer_waste * HeapWordSize / K,
4167 undo_waste * HeapWordSize / K);
4168 }
4169
4170 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4171 private:
4172 BoolObjectClosure* _is_alive;
4173 int _initial_string_table_size;
4174 int _initial_symbol_table_size;
4175
4176 bool _process_strings;
4177 int _strings_processed;
4178 int _strings_removed;
4179
4180 bool _process_symbols;
4189 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4190
4191 _initial_string_table_size = StringTable::the_table()->table_size();
4192 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4193 if (process_strings) {
4194 StringTable::clear_parallel_claimed_index();
4195 }
4196 if (process_symbols) {
4197 SymbolTable::clear_parallel_claimed_index();
4198 }
4199 }
4200
4201 ~G1StringSymbolTableUnlinkTask() {
4202 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4203 "claim value %d after unlink less than initial string table size %d",
4204 StringTable::parallel_claimed_index(), _initial_string_table_size);
4205 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4206 "claim value %d after unlink less than initial symbol table size %d",
4207 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4208
4209 log_trace(gc, stringdedup)("Cleaned string and symbol table, "
4210 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4211 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4212 strings_processed(), strings_removed(),
4213 symbols_processed(), symbols_removed());
4214 }
4215
4216 void work(uint worker_id) {
4217 int strings_processed = 0;
4218 int strings_removed = 0;
4219 int symbols_processed = 0;
4220 int symbols_removed = 0;
4221 if (_process_strings) {
4222 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4223 Atomic::add(strings_processed, &_strings_processed);
4224 Atomic::add(strings_removed, &_strings_removed);
4225 }
4226 if (_process_symbols) {
4227 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4228 Atomic::add(symbols_processed, &_symbols_processed);
4229 Atomic::add(symbols_removed, &_symbols_removed);
4230 }
4231 }
4232
4233 size_t strings_processed() const { return (size_t)_strings_processed; }
4234 size_t strings_removed() const { return (size_t)_strings_removed; }
5021
5022 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5023 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5024
5025 // Should G1EvacuationFailureALot be in effect for this GC?
5026 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5027
5028 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5029 double start_par_time_sec = os::elapsedTime();
5030 double end_par_time_sec;
5031
5032 {
5033 const uint n_workers = workers()->active_workers();
5034 G1RootProcessor root_processor(this, n_workers);
5035 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5036 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5037 if (collector_state()->during_initial_mark_pause()) {
5038 ClassLoaderDataGraph::clear_claimed_marks();
5039 }
5040
5041 print_termination_stats_hdr();
5042
5043 workers()->run_task(&g1_par_task);
5044 end_par_time_sec = os::elapsedTime();
5045
5046 // Closing the inner scope will execute the destructor
5047 // for the G1RootProcessor object. We record the current
5048 // elapsed time before closing the scope so that time
5049 // taken for the destructor is NOT included in the
5050 // reported parallel time.
5051 }
5052
5053 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5054
5055 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5056 phase_times->record_par_time(par_time_ms);
5057
5058 double code_root_fixup_time_ms =
5059 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5060 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5061
5246 }
5247 }
5248
5249 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5250 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5251 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5252 verify_dirty_region(hr);
5253 }
5254 }
5255
5256 void G1CollectedHeap::verify_dirty_young_regions() {
5257 verify_dirty_young_list(_young_list->first_region());
5258 }
5259
5260 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5261 HeapWord* tams, HeapWord* end) {
5262 guarantee(tams <= end,
5263 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5264 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5265 if (result < end) {
5266 log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5267 log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
5268 return false;
5269 }
5270 return true;
5271 }
5272
5273 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5274 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5275 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5276
5277 HeapWord* bottom = hr->bottom();
5278 HeapWord* ptams = hr->prev_top_at_mark_start();
5279 HeapWord* ntams = hr->next_top_at_mark_start();
5280 HeapWord* end = hr->end();
5281
5282 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5283
5284 bool res_n = true;
5285 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5286 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5287 // if we happen to be in that state.
5288 if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5289 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5290 }
5291 if (!res_p || !res_n) {
5292 log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5293 log_info(gc, verify)("#### Caller: %s", caller);
5294 return false;
5295 }
5296 return true;
5297 }
5298
5299 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5300 if (!G1VerifyBitmaps) return;
5301
5302 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5303 }
5304
5305 class G1VerifyBitmapClosure : public HeapRegionClosure {
5306 private:
5307 const char* _caller;
5308 G1CollectedHeap* _g1h;
5309 bool _failures;
5310
5311 public:
5312 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5313 _caller(caller), _g1h(g1h), _failures(false) { }
5325
5326 void G1CollectedHeap::check_bitmaps(const char* caller) {
5327 if (!G1VerifyBitmaps) return;
5328
5329 G1VerifyBitmapClosure cl(caller, this);
5330 heap_region_iterate(&cl);
5331 guarantee(!cl.failures(), "bitmap verification");
5332 }
5333
5334 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5335 private:
5336 bool _failures;
5337 public:
5338 G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5339
5340 virtual bool doHeapRegion(HeapRegion* hr) {
5341 uint i = hr->hrm_index();
5342 InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5343 if (hr->is_humongous()) {
5344 if (hr->in_collection_set()) {
5345 log_info(gc, verify)("\n## humongous region %u in CSet", i);
5346 _failures = true;
5347 return true;
5348 }
5349 if (cset_state.is_in_cset()) {
5350 log_info(gc, verify)("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5351 _failures = true;
5352 return true;
5353 }
5354 if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5355 log_info(gc, verify)("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5356 _failures = true;
5357 return true;
5358 }
5359 } else {
5360 if (cset_state.is_humongous()) {
5361 log_info(gc, verify)("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5362 _failures = true;
5363 return true;
5364 }
5365 if (hr->in_collection_set() != cset_state.is_in_cset()) {
5366 log_info(gc, verify)("\n## in CSet %d / cset state %d inconsistency for region %u",
5367 hr->in_collection_set(), cset_state.value(), i);
5368 _failures = true;
5369 return true;
5370 }
5371 if (cset_state.is_in_cset()) {
5372 if (hr->is_young() != (cset_state.is_young())) {
5373 log_info(gc, verify)("\n## is_young %d / cset state %d inconsistency for region %u",
5374 hr->is_young(), cset_state.value(), i);
5375 _failures = true;
5376 return true;
5377 }
5378 if (hr->is_old() != (cset_state.is_old())) {
5379 log_info(gc, verify)("\n## is_old %d / cset state %d inconsistency for region %u",
5380 hr->is_old(), cset_state.value(), i);
5381 _failures = true;
5382 return true;
5383 }
5384 }
5385 }
5386 return false;
5387 }
5388
5389 bool failures() const { return _failures; }
5390 };
5391
5392 bool G1CollectedHeap::check_cset_fast_test() {
5393 G1CheckCSetFastTableClosure cl;
5394 _hrm.iterate(&cl);
5395 return !cl.failures();
5396 }
5397 #endif // PRODUCT
5398
5399 void G1CollectedHeap::cleanUpCardTable() {
5569 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5570 // until the end of a concurrent mark.
5571 //
5572 // It is not required to check whether the object has been found dead by marking
5573 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5574 // all objects allocated during that time are considered live.
5575 // SATB marking is even more conservative than the remembered set.
5576 // So if at this point in the collection there is no remembered set entry,
5577 // nobody has a reference to it.
5578 // At the start of collection we flush all refinement logs, and remembered sets
5579 // are completely up-to-date wrt to references to the humongous object.
5580 //
5581 // Other implementation considerations:
5582 // - never consider object arrays at this time because they would pose
5583 // considerable effort for cleaning up the the remembered sets. This is
5584 // required because stale remembered sets might reference locations that
5585 // are currently allocated into.
5586 uint region_idx = r->hrm_index();
5587 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5588 !r->rem_set()->is_empty()) {
5589 log_trace(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5590 region_idx,
5591 (size_t)obj->size() * HeapWordSize,
5592 p2i(r->bottom()),
5593 r->rem_set()->occupied(),
5594 r->rem_set()->strong_code_roots_list_length(),
5595 next_bitmap->isMarked(r->bottom()),
5596 g1h->is_humongous_reclaim_candidate(region_idx),
5597 obj->is_typeArray()
5598 );
5599 return false;
5600 }
5601
5602 guarantee(obj->is_typeArray(),
5603 "Only eagerly reclaiming type arrays is supported, but the object "
5604 PTR_FORMAT " is not.", p2i(r->bottom()));
5605
5606 log_trace(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5607 region_idx,
5608 (size_t)obj->size() * HeapWordSize,
5609 p2i(r->bottom()),
5610 r->rem_set()->occupied(),
5611 r->rem_set()->strong_code_roots_list_length(),
5612 next_bitmap->isMarked(r->bottom()),
5613 g1h->is_humongous_reclaim_candidate(region_idx),
5614 obj->is_typeArray()
5615 );
5616
5617 // Need to clear mark bit of the humongous object if already set.
5618 if (next_bitmap->isMarked(r->bottom())) {
5619 next_bitmap->clear(r->bottom());
5620 }
5621 do {
5622 HeapRegion* next = g1h->next_region_in_humongous(r);
5623 _freed_bytes += r->used();
5624 r->set_containing_set(NULL);
5625 _humongous_regions_removed.increment(1u, r->capacity());
5626 g1h->free_humongous_region(r, _free_region_list, false);
5627 r = next;
5628 } while (r != NULL);
5629
5630 return false;
5631 }
5632
5633 HeapRegionSetCount& humongous_free_count() {
5634 return _humongous_regions_removed;
5635 }
5636
5637 size_t bytes_freed() const {
5638 return _freed_bytes;
5639 }
5640
5641 size_t humongous_reclaimed() const {
5642 return _humongous_regions_removed.length();
5643 }
5644 };
5645
5646 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5647 assert_at_safepoint(true);
5648
5649 if (!G1EagerReclaimHumongousObjects ||
5650 (!_has_humongous_reclaim_candidates && !log_is_enabled(Trace, gc, humongous))) {
5651 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5652 return;
5653 }
5654
5655 double start_time = os::elapsedTime();
5656
5657 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5658
5659 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5660 heap_region_iterate(&cl);
5661
5662 HeapRegionSetCount empty_set;
5663 remove_from_old_sets(empty_set, cl.humongous_free_count());
5664
5665 G1HRPrinter* hrp = hr_printer();
5666 if (hrp->is_active()) {
5667 FreeRegionListIterator iter(&local_cleanup_list);
5668 while (iter.more_available()) {
5669 HeapRegion* hr = iter.get_next();
5670 hrp->cleanup(hr);
5683 // the current incremental collection set in preparation of a
5684 // full collection. After the full GC we will start to build up
5685 // the incremental collection set again.
5686 // This is only called when we're doing a full collection
5687 // and is immediately followed by the tearing down of the young list.
5688
5689 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5690 HeapRegion* cur = cs_head;
5691
5692 while (cur != NULL) {
5693 HeapRegion* next = cur->next_in_collection_set();
5694 assert(cur->in_collection_set(), "bad CS");
5695 cur->set_next_in_collection_set(NULL);
5696 clear_in_cset(cur);
5697 cur->set_young_index_in_cset(-1);
5698 cur = next;
5699 }
5700 }
5701
5702 void G1CollectedHeap::set_free_regions_coming() {
5703 log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "
5704 "setting free regions coming");
5705
5706 assert(!free_regions_coming(), "pre-condition");
5707 _free_regions_coming = true;
5708 }
5709
5710 void G1CollectedHeap::reset_free_regions_coming() {
5711 assert(free_regions_coming(), "pre-condition");
5712
5713 {
5714 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5715 _free_regions_coming = false;
5716 SecondaryFreeList_lock->notify_all();
5717 }
5718
5719 log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "
5720 "reset free regions coming");
5721 }
5722
5723 void G1CollectedHeap::wait_while_free_regions_coming() {
5724 // Most of the time we won't have to wait, so let's do a quick test
5725 // first before we take the lock.
5726 if (!free_regions_coming()) {
5727 return;
5728 }
5729
5730 log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "
5731 "waiting for free regions");
5732
5733 {
5734 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5735 while (free_regions_coming()) {
5736 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5737 }
5738 }
5739
5740 log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "
5741 "done waiting for free regions");
5742 }
5743
5744 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5745 return _allocator->is_retained_old_region(hr);
5746 }
5747
5748 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5749 _young_list->push_region(hr);
5750 }
5751
5752 class NoYoungRegionsClosure: public HeapRegionClosure {
5753 private:
5754 bool _success;
5755 public:
5756 NoYoungRegionsClosure() : _success(true) { }
5757 bool doHeapRegion(HeapRegion* r) {
5758 if (r->is_young()) {
5759 log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5760 p2i(r->bottom()), p2i(r->end()));
5761 _success = false;
5762 }
5763 return false;
5764 }
5765 bool success() { return _success; }
5766 };
5767
5768 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5769 bool ret = _young_list->check_list_empty(check_sample);
5770
5771 if (check_heap) {
5772 NoYoungRegionsClosure closure;
5773 heap_region_iterate(&closure);
5774 ret = ret && closure.success();
5775 }
5776
5777 return ret;
5778 }
5779
5990 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5991 size_t allocated_bytes,
5992 InCSetState dest) {
5993 bool during_im = collector_state()->during_initial_mark_pause();
5994 alloc_region->note_end_of_copying(during_im);
5995 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5996 if (dest.is_young()) {
5997 young_list()->add_survivor_region(alloc_region);
5998 } else {
5999 _old_set.add(alloc_region);
6000 }
6001 _hr_printer.retire(alloc_region);
6002 }
6003
6004 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6005 bool expanded = false;
6006 uint index = _hrm.find_highest_free(&expanded);
6007
6008 if (index != G1_NO_HRM_INDEX) {
6009 if (expanded) {
6010 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
6011 HeapRegion::GrainWords * HeapWordSize);
6012 }
6013 _hrm.allocate_free_regions_starting_at(index, 1);
6014 return region_at(index);
6015 }
6016 return NULL;
6017 }
6018
6019 // Heap region set verification
6020
6021 class VerifyRegionListsClosure : public HeapRegionClosure {
6022 private:
6023 HeapRegionSet* _old_set;
6024 HeapRegionSet* _humongous_set;
6025 HeapRegionManager* _hrm;
6026
6027 public:
6028 HeapRegionSetCount _old_count;
6029 HeapRegionSetCount _humongous_count;
6030 HeapRegionSetCount _free_count;
|