18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/concurrentG1Refine.hpp"
32 #include "gc/g1/concurrentG1RefineThread.hpp"
33 #include "gc/g1/concurrentMarkThread.inline.hpp"
34 #include "gc/g1/g1Allocator.inline.hpp"
35 #include "gc/g1/g1CollectedHeap.inline.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ErgoVerbose.hpp"
39 #include "gc/g1/g1EvacFailure.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1Log.hpp"
42 #include "gc/g1/g1MarkSweep.hpp"
43 #include "gc/g1/g1OopClosures.inline.hpp"
44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
46 #include "gc/g1/g1RemSet.inline.hpp"
47 #include "gc/g1/g1RootClosures.hpp"
48 #include "gc/g1/g1RootProcessor.hpp"
49 #include "gc/g1/g1StringDedup.hpp"
50 #include "gc/g1/g1YCTypes.hpp"
51 #include "gc/g1/heapRegion.inline.hpp"
52 #include "gc/g1/heapRegionRemSet.hpp"
53 #include "gc/g1/heapRegionSet.inline.hpp"
54 #include "gc/g1/suspendibleThreadSet.hpp"
55 #include "gc/g1/vm_operations_g1.hpp"
56 #include "gc/shared/gcHeapSummary.hpp"
57 #include "gc/shared/gcId.hpp"
58 #include "gc/shared/gcLocker.inline.hpp"
59 #include "gc/shared/gcTimer.hpp"
60 #include "gc/shared/gcTrace.hpp"
61 #include "gc/shared/gcTraceTime.hpp"
62 #include "gc/shared/generationSpec.hpp"
63 #include "gc/shared/isGCActiveMark.hpp"
64 #include "gc/shared/referenceProcessor.hpp"
65 #include "gc/shared/taskqueue.inline.hpp"
66 #include "memory/allocation.hpp"
67 #include "memory/iterator.hpp"
68 #include "oops/oop.inline.hpp"
69 #include "runtime/atomic.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/orderAccess.inline.hpp"
72 #include "runtime/vmThread.hpp"
73 #include "utilities/globalDefinitions.hpp"
74 #include "utilities/stack.inline.hpp"
75
76 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
77
78 // INVARIANTS/NOTES
79 //
80 // All allocation activity covered by the G1CollectedHeap interface is
81 // serialized by acquiring the HeapLock. This happens in mem_allocate
82 // and allocate_new_tlab, which are the "entry" points to the
83 // allocation code from the rest of the JVM. (Note that this does not
84 // apply to TLAB allocation, which is not part of this interface: it
85 // is done by clients of this interface.)
187 } while (hr != head);
188 assert(hr != NULL, "invariant");
189 hr->set_next_dirty_cards_region(NULL);
190 return hr;
191 }
192
193 // Returns true if the reference points to an object that
194 // can move in an incremental collection.
195 bool G1CollectedHeap::is_scavengable(const void* p) {
196 HeapRegion* hr = heap_region_containing(p);
197 return !hr->is_pinned();
198 }
199
200 // Private methods.
201
202 HeapRegion*
203 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
204 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
205 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
206 if (!_secondary_free_list.is_empty()) {
207 if (G1ConcRegionFreeingVerbose) {
208 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
209 "secondary_free_list has %u entries",
210 _secondary_free_list.length());
211 }
212 // It looks as if there are free regions available on the
213 // secondary_free_list. Let's move them to the free_list and try
214 // again to allocate from it.
215 append_secondary_free_list();
216
217 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
218 "empty we should have moved at least one entry to the free_list");
219 HeapRegion* res = _hrm.allocate_free_region(is_old);
220 if (G1ConcRegionFreeingVerbose) {
221 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
222 "allocated " HR_FORMAT " from secondary_free_list",
223 HR_FORMAT_PARAMS(res));
224 }
225 return res;
226 }
227
228 // Wait here until we get notified either when (a) there are no
229 // more free regions coming or (b) some regions have been moved on
230 // the secondary_free_list.
231 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
232 }
233
234 if (G1ConcRegionFreeingVerbose) {
235 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
236 "could not allocate from secondary_free_list");
237 }
238 return NULL;
239 }
240
241 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
242 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
243 "the only time we use this to allocate a humongous region is "
244 "when we are allocating a single humongous region");
245
246 HeapRegion* res;
247 if (G1StressConcRegionFreeing) {
248 if (!_secondary_free_list.is_empty()) {
249 if (G1ConcRegionFreeingVerbose) {
250 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
251 "forced to look at the secondary_free_list");
252 }
253 res = new_region_try_secondary_free_list(is_old);
254 if (res != NULL) {
255 return res;
256 }
257 }
258 }
259
260 res = _hrm.allocate_free_region(is_old);
261
262 if (res == NULL) {
263 if (G1ConcRegionFreeingVerbose) {
264 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
265 "res == NULL, trying the secondary_free_list");
266 }
267 res = new_region_try_secondary_free_list(is_old);
268 }
269 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
270 // Currently, only attempts to allocate GC alloc regions set
271 // do_expand to true. So, we should only reach here during a
272 // safepoint. If this assumption changes we might have to
273 // reconsider the use of _expand_heap_after_alloc_failure.
274 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
275
276 ergo_verbose1(ErgoHeapSizing,
277 "attempt heap expansion",
278 ergo_format_reason("region allocation request failed")
279 ergo_format_byte("allocation request"),
280 word_size * HeapWordSize);
281 if (expand(word_size * HeapWordSize)) {
282 // Given that expand() succeeded in expanding the heap, and we
283 // always expand the heap by an amount aligned to the heap
284 // region size, the free list should in theory not be empty.
285 // In either case allocate_free_region() will check for NULL.
286 res = _hrm.allocate_free_region(is_old);
287 } else {
288 _expand_heap_after_alloc_failure = false;
289 }
290 }
291 return res;
292 }
293
294 HeapWord*
295 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
296 uint num_regions,
297 size_t word_size,
298 AllocationContext_t context) {
299 assert(first != G1_NO_HRM_INDEX, "pre-condition");
300 assert(is_humongous(word_size), "word_size should be humongous");
431 // potentially waits for regions from the secondary free list.
432 wait_while_free_regions_coming();
433 append_secondary_free_list_if_not_empty_with_lock();
434
435 // Policy: Try only empty regions (i.e. already committed first). Maybe we
436 // are lucky enough to find some.
437 first = _hrm.find_contiguous_only_empty(obj_regions);
438 if (first != G1_NO_HRM_INDEX) {
439 _hrm.allocate_free_regions_starting_at(first, obj_regions);
440 }
441 }
442
443 if (first == G1_NO_HRM_INDEX) {
444 // Policy: We could not find enough regions for the humongous object in the
445 // free list. Look through the heap to find a mix of free and uncommitted regions.
446 // If so, try expansion.
447 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
448 if (first != G1_NO_HRM_INDEX) {
449 // We found something. Make sure these regions are committed, i.e. expand
450 // the heap. Alternatively we could do a defragmentation GC.
451 ergo_verbose1(ErgoHeapSizing,
452 "attempt heap expansion",
453 ergo_format_reason("humongous allocation request failed")
454 ergo_format_byte("allocation request"),
455 word_size * HeapWordSize);
456
457 _hrm.expand_at(first, obj_regions);
458 g1_policy()->record_new_heap_size(num_regions());
459
460 #ifdef ASSERT
461 for (uint i = first; i < first + obj_regions; ++i) {
462 HeapRegion* hr = region_at(i);
463 assert(hr->is_free(), "sanity");
464 assert(hr->is_empty(), "sanity");
465 assert(is_on_master_free_list(hr), "sanity");
466 }
467 #endif
468 _hrm.allocate_free_regions_starting_at(first, obj_regions);
469 } else {
470 // Policy: Potentially trigger a defragmentation GC.
471 }
472 }
473
474 HeapWord* result = NULL;
475 if (first != G1_NO_HRM_INDEX) {
476 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
754 HeapRegion* start_region = _hrm.addr_to_region(start_address);
755 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
756 start_address = start_region->end();
757 if (start_address > last_address) {
758 increase_used(word_size * HeapWordSize);
759 start_region->set_top(last_address + 1);
760 continue;
761 }
762 start_region->set_top(start_address);
763 curr_range = MemRegion(start_address, last_address + 1);
764 start_region = _hrm.addr_to_region(start_address);
765 }
766
767 // Perform the actual region allocation, exiting if it fails.
768 // Then note how much new space we have allocated.
769 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
770 return false;
771 }
772 increase_used(word_size * HeapWordSize);
773 if (commits != 0) {
774 ergo_verbose1(ErgoHeapSizing,
775 "attempt heap expansion",
776 ergo_format_reason("allocate archive regions")
777 ergo_format_byte("total size"),
778 HeapRegion::GrainWords * HeapWordSize * commits);
779 }
780
781 // Mark each G1 region touched by the range as archive, add it to the old set,
782 // and set the allocation context and top.
783 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
784 HeapRegion* last_region = _hrm.addr_to_region(last_address);
785 prev_last_region = last_region;
786
787 while (curr_region != NULL) {
788 assert(curr_region->is_empty() && !curr_region->is_pinned(),
789 "Region already in use (index %u)", curr_region->hrm_index());
790 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
791 curr_region->set_allocation_context(AllocationContext::system());
792 curr_region->set_archive();
793 _old_set.add(curr_region);
794 if (curr_region != last_region) {
795 curr_region->set_top(curr_region->end());
796 curr_region = _hrm.next_region_in_heap(curr_region);
797 } else {
798 curr_region->set_top(last_address + 1);
939 guarantee(curr_region->is_archive(),
940 "Expected archive region at index %u", curr_region->hrm_index());
941 uint curr_index = curr_region->hrm_index();
942 _old_set.remove(curr_region);
943 curr_region->set_free();
944 curr_region->set_top(curr_region->bottom());
945 if (curr_region != last_region) {
946 curr_region = _hrm.next_region_in_heap(curr_region);
947 } else {
948 curr_region = NULL;
949 }
950 _hrm.shrink_at(curr_index, 1);
951 uncommitted_regions++;
952 }
953
954 // Notify mark-sweep that this is no longer an archive range.
955 G1MarkSweep::set_range_archive(ranges[i], false);
956 }
957
958 if (uncommitted_regions != 0) {
959 ergo_verbose1(ErgoHeapSizing,
960 "attempt heap shrinking",
961 ergo_format_reason("uncommitted archive regions")
962 ergo_format_byte("total size"),
963 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
964 }
965 decrease_used(size_used);
966 }
967
968 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
969 uint* gc_count_before_ret,
970 uint* gclocker_retry_count_ret) {
971 // The structure of this method has a lot of similarities to
972 // attempt_allocation_slow(). The reason these two were not merged
973 // into a single one is that such a method would require several "if
974 // allocation is not humongous do this, otherwise do that"
975 // conditional paths which would obscure its flow. In fact, an early
976 // version of this code did use a unified method which was harder to
977 // follow and, as a result, it had subtle bugs that were hard to
978 // track down. So keeping these two methods separate allows each to
979 // be more readable. It will be good to keep these two in sync as
980 // much as possible.
981
982 assert_heap_not_locked_and_not_at_safepoint();
1180 // We only generate output for non-empty regions.
1181 } else if (hr->is_starts_humongous()) {
1182 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1183 } else if (hr->is_continues_humongous()) {
1184 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1185 } else if (hr->is_archive()) {
1186 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1187 } else if (hr->is_old()) {
1188 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1189 } else {
1190 ShouldNotReachHere();
1191 }
1192 return false;
1193 }
1194
1195 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1196 : _hr_printer(hr_printer) { }
1197 };
1198
1199 void G1CollectedHeap::print_hrm_post_compaction() {
1200 PostCompactionPrinterClosure cl(hr_printer());
1201 heap_region_iterate(&cl);
1202 }
1203
1204 bool G1CollectedHeap::do_collection(bool explicit_gc,
1205 bool clear_all_soft_refs,
1206 size_t word_size) {
1207 assert_at_safepoint(true /* should_be_vm_thread */);
1208
1209 if (GC_locker::check_active_before_gc()) {
1210 return false;
1211 }
1212
1213 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1214 gc_timer->register_gc_start();
1215
1216 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1217 GCIdMark gc_id_mark;
1218 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1219
1220 SvcGCMarker sgcm(SvcGCMarker::FULL);
1221 ResourceMark rm;
1222
1223 G1Log::update_level();
1224 print_heap_before_gc();
1225 trace_heap_before_gc(gc_tracer);
1226
1227 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1228
1229 verify_region_sets_optional();
1230
1231 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1232 collector_policy()->should_clear_all_soft_refs();
1233
1234 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1235
1236 {
1237 IsGCActiveMark x;
1238
1239 // Timing
1240 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1241 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1242
1243 {
1244 GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1245 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1246 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1247
1248 g1_policy()->record_full_collection_start();
1249
1250 // Note: When we have a more flexible GC logging framework that
1251 // allows us to add optional attributes to a GC log record we
1252 // could consider timing and reporting how long we wait in the
1253 // following two methods.
1254 wait_while_free_regions_coming();
1255 // If we start the compaction before the CM threads finish
1256 // scanning the root regions we might trip them over as we'll
1257 // be moving objects / updating references. So let's wait until
1258 // they are done. By telling them to abort, they should complete
1259 // early.
1260 _cm->root_regions()->abort();
1261 _cm->root_regions()->wait_until_scan_finished();
1262 append_secondary_free_list_if_not_empty_with_lock();
1263
1264 gc_prologue(true);
1275 #if defined(COMPILER2) || INCLUDE_JVMCI
1276 DerivedPointerTable::clear();
1277 #endif
1278
1279 // Disable discovery and empty the discovered lists
1280 // for the CM ref processor.
1281 ref_processor_cm()->disable_discovery();
1282 ref_processor_cm()->abandon_partial_discovery();
1283 ref_processor_cm()->verify_no_references_recorded();
1284
1285 // Abandon current iterations of concurrent marking and concurrent
1286 // refinement, if any are in progress. We have to do this before
1287 // wait_until_scan_finished() below.
1288 concurrent_mark()->abort();
1289
1290 // Make sure we'll choose a new allocation region afterwards.
1291 _allocator->release_mutator_alloc_region();
1292 _allocator->abandon_gc_alloc_regions();
1293 g1_rem_set()->cleanupHRRS();
1294
1295 // We should call this after we retire any currently active alloc
1296 // regions so that all the ALLOC / RETIRE events are generated
1297 // before the start GC event.
1298 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1299
1300 // We may have added regions to the current incremental collection
1301 // set between the last GC or pause and now. We need to clear the
1302 // incremental collection set and then start rebuilding it afresh
1303 // after this full GC.
1304 abandon_collection_set(g1_policy()->inc_cset_head());
1305 g1_policy()->clear_incremental_cset();
1306 g1_policy()->stop_incremental_cset_building();
1307
1308 tear_down_region_sets(false /* free_list_only */);
1309 collector_state()->set_gcs_are_young(true);
1310
1311 // See the comments in g1CollectedHeap.hpp and
1312 // G1CollectedHeap::ref_processing_init() about
1313 // how reference processing currently works in G1.
1314
1315 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1316 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1317
1318 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1319 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1347 ClassLoaderDataGraph::purge();
1348 MetaspaceAux::verify_metrics();
1349
1350 // Note: since we've just done a full GC, concurrent
1351 // marking is no longer active. Therefore we need not
1352 // re-enable reference discovery for the CM ref processor.
1353 // That will be done at the start of the next marking cycle.
1354 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1355 ref_processor_cm()->verify_no_references_recorded();
1356
1357 reset_gc_time_stamp();
1358 // Since everything potentially moved, we will clear all remembered
1359 // sets, and clear all cards. Later we will rebuild remembered
1360 // sets. We will also reset the GC time stamps of the regions.
1361 clear_rsets_post_compaction();
1362 check_gc_time_stamps();
1363
1364 // Resize the heap if necessary.
1365 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1366
1367 if (_hr_printer.is_active()) {
1368 // We should do this after we potentially resize the heap so
1369 // that all the COMMIT / UNCOMMIT events are generated before
1370 // the end GC event.
1371
1372 print_hrm_post_compaction();
1373 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1374 }
1375
1376 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1377 if (hot_card_cache->use_cache()) {
1378 hot_card_cache->reset_card_counts();
1379 hot_card_cache->reset_hot_cache();
1380 }
1381
1382 // Rebuild remembered sets of all regions.
1383 uint n_workers =
1384 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1385 workers()->active_workers(),
1386 Threads::number_of_non_daemon_threads());
1387 workers()->set_active_workers(n_workers);
1388
1389 ParRebuildRSTask rebuild_rs_task(this);
1390 workers()->run_task(&rebuild_rs_task);
1391
1392 // Rebuild the strong code root lists for each region
1393 rebuild_strong_code_roots();
1394
1423 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1424 // objects marked during a full GC against the previous bitmap.
1425 // But we need to clear it before calling check_bitmaps below since
1426 // the full GC has compacted objects and updated TAMS but not updated
1427 // the prev bitmap.
1428 if (G1VerifyBitmaps) {
1429 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1430 }
1431 check_bitmaps("Full GC End");
1432
1433 // Start a new incremental collection set for the next pause
1434 assert(g1_policy()->collection_set() == NULL, "must be");
1435 g1_policy()->start_incremental_cset_building();
1436
1437 clear_cset_fast_test();
1438
1439 _allocator->init_mutator_alloc_region();
1440
1441 g1_policy()->record_full_collection_end();
1442
1443 if (G1Log::fine()) {
1444 g1_policy()->print_heap_transition();
1445 }
1446
1447 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1448 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1449 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1450 // before any GC notifications are raised.
1451 g1mm()->update_sizes();
1452
1453 gc_epilogue(true);
1454 }
1455
1456 if (G1Log::finer()) {
1457 g1_policy()->print_detailed_heap_transition(true /* full */);
1458 }
1459
1460 print_heap_after_gc();
1461 trace_heap_after_gc(gc_tracer);
1462
1463 post_full_gc_dump(gc_timer);
1464
1465 gc_timer->register_gc_end();
1466 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1467 }
1468
1469 return true;
1470 }
1471
1472 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1473 // do_collection() will return whether it succeeded in performing
1474 // the GC. Currently, there is no facility on the
1475 // do_full_collection() API to notify the caller than the collection
1476 // did not succeed (e.g., because it was locked out by the GC
1477 // locker). So, right now, we'll ignore the return value.
1478 bool dummy = do_collection(true, /* explicit_gc */
1523
1524 // This assert only makes sense here, before we adjust them
1525 // with respect to the min and max heap size.
1526 assert(minimum_desired_capacity <= maximum_desired_capacity,
1527 "minimum_desired_capacity = " SIZE_FORMAT ", "
1528 "maximum_desired_capacity = " SIZE_FORMAT,
1529 minimum_desired_capacity, maximum_desired_capacity);
1530
1531 // Should not be greater than the heap max size. No need to adjust
1532 // it with respect to the heap min size as it's a lower bound (i.e.,
1533 // we'll try to make the capacity larger than it, not smaller).
1534 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1535 // Should not be less than the heap min size. No need to adjust it
1536 // with respect to the heap max size as it's an upper bound (i.e.,
1537 // we'll try to make the capacity smaller than it, not greater).
1538 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1539
1540 if (capacity_after_gc < minimum_desired_capacity) {
1541 // Don't expand unless it's significant
1542 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1543 ergo_verbose4(ErgoHeapSizing,
1544 "attempt heap expansion",
1545 ergo_format_reason("capacity lower than "
1546 "min desired capacity after Full GC")
1547 ergo_format_byte("capacity")
1548 ergo_format_byte("occupancy")
1549 ergo_format_byte_perc("min desired capacity"),
1550 capacity_after_gc, used_after_gc,
1551 minimum_desired_capacity, (double) MinHeapFreeRatio);
1552 expand(expand_bytes);
1553
1554 // No expansion, now see if we want to shrink
1555 } else if (capacity_after_gc > maximum_desired_capacity) {
1556 // Capacity too large, compute shrinking size
1557 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1558 ergo_verbose4(ErgoHeapSizing,
1559 "attempt heap shrinking",
1560 ergo_format_reason("capacity higher than "
1561 "max desired capacity after Full GC")
1562 ergo_format_byte("capacity")
1563 ergo_format_byte("occupancy")
1564 ergo_format_byte_perc("max desired capacity"),
1565 capacity_after_gc, used_after_gc,
1566 maximum_desired_capacity, (double) MaxHeapFreeRatio);
1567 shrink(shrink_bytes);
1568 }
1569 }
1570
1571 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1572 AllocationContext_t context,
1573 bool do_gc,
1574 bool clear_all_soft_refs,
1575 bool expect_null_mutator_alloc_region,
1576 bool* gc_succeeded) {
1577 *gc_succeeded = true;
1578 // Let's attempt the allocation first.
1579 HeapWord* result =
1580 attempt_allocation_at_safepoint(word_size,
1581 context,
1582 expect_null_mutator_alloc_region);
1583 if (result != NULL) {
1584 assert(*gc_succeeded, "sanity");
1585 return result;
1586 }
1653
1654 // What else? We might try synchronous finalization later. If the total
1655 // space available is large enough for the allocation, then a more
1656 // complete compaction phase than we've tried so far might be
1657 // appropriate.
1658 assert(*succeeded, "sanity");
1659 return NULL;
1660 }
1661
1662 // Attempting to expand the heap sufficiently
1663 // to support an allocation of the given "word_size". If
1664 // successful, perform the allocation and return the address of the
1665 // allocated block, or else "NULL".
1666
1667 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1668 assert_at_safepoint(true /* should_be_vm_thread */);
1669
1670 verify_region_sets_optional();
1671
1672 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1673 ergo_verbose1(ErgoHeapSizing,
1674 "attempt heap expansion",
1675 ergo_format_reason("allocation request failed")
1676 ergo_format_byte("allocation request"),
1677 word_size * HeapWordSize);
1678 if (expand(expand_bytes)) {
1679 _hrm.verify_optional();
1680 verify_region_sets_optional();
1681 return attempt_allocation_at_safepoint(word_size,
1682 context,
1683 false /* expect_null_mutator_alloc_region */);
1684 }
1685 return NULL;
1686 }
1687
1688 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1689 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1690 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1691 HeapRegion::GrainBytes);
1692 ergo_verbose2(ErgoHeapSizing,
1693 "expand the heap",
1694 ergo_format_byte("requested expansion amount")
1695 ergo_format_byte("attempted expansion amount"),
1696 expand_bytes, aligned_expand_bytes);
1697
1698 if (is_maximal_no_gc()) {
1699 ergo_verbose0(ErgoHeapSizing,
1700 "did not expand the heap",
1701 ergo_format_reason("heap already fully expanded"));
1702 return false;
1703 }
1704
1705 double expand_heap_start_time_sec = os::elapsedTime();
1706 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1707 assert(regions_to_expand > 0, "Must expand by at least one region");
1708
1709 uint expanded_by = _hrm.expand_by(regions_to_expand);
1710 if (expand_time_ms != NULL) {
1711 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1712 }
1713
1714 if (expanded_by > 0) {
1715 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1716 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1717 g1_policy()->record_new_heap_size(num_regions());
1718 } else {
1719 ergo_verbose0(ErgoHeapSizing,
1720 "did not expand the heap",
1721 ergo_format_reason("heap expansion operation failed"));
1722 // The expansion of the virtual storage space was unsuccessful.
1723 // Let's see if it was because we ran out of swap.
1724 if (G1ExitOnExpansionFailure &&
1725 _hrm.available() >= regions_to_expand) {
1726 // We had head room...
1727 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1728 }
1729 }
1730 return regions_to_expand > 0;
1731 }
1732
1733 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1734 size_t aligned_shrink_bytes =
1735 ReservedSpace::page_align_size_down(shrink_bytes);
1736 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1737 HeapRegion::GrainBytes);
1738 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1739
1740 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1741 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1742
1743 ergo_verbose3(ErgoHeapSizing,
1744 "shrink the heap",
1745 ergo_format_byte("requested shrinking amount")
1746 ergo_format_byte("aligned shrinking amount")
1747 ergo_format_byte("attempted shrinking amount"),
1748 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1749 if (num_regions_removed > 0) {
1750 g1_policy()->record_new_heap_size(num_regions());
1751 } else {
1752 ergo_verbose0(ErgoHeapSizing,
1753 "did not shrink the heap",
1754 ergo_format_reason("heap shrinking operation failed"));
1755 }
1756 }
1757
1758 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1759 verify_region_sets_optional();
1760
1761 // We should only reach here at the end of a Full GC which means we
1762 // should not not be holding to any GC alloc regions. The method
1763 // below will make sure of that and do any remaining clean up.
1764 _allocator->abandon_gc_alloc_regions();
1765
1766 // Instead of tearing down / rebuilding the free lists here, we
1767 // could instead use the remove_all_pending() method on free_list to
1768 // remove only the ones that we need to remove.
1769 tear_down_region_sets(true /* free_list_only */);
1770 shrink_helper(shrink_bytes);
1771 rebuild_region_sets(true /* free_list_only */);
1772
1773 _hrm.verify_optional();
1774 verify_region_sets_optional();
1846 // Initialize the G1EvacuationFailureALot counters and flags.
1847 NOT_PRODUCT(reset_evacuation_should_fail();)
1848
1849 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1850 }
1851
1852 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1853 size_t size,
1854 size_t translation_factor) {
1855 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1856 // Allocate a new reserved space, preferring to use large pages.
1857 ReservedSpace rs(size, preferred_page_size);
1858 G1RegionToSpaceMapper* result =
1859 G1RegionToSpaceMapper::create_mapper(rs,
1860 size,
1861 rs.alignment(),
1862 HeapRegion::GrainBytes,
1863 translation_factor,
1864 mtGC);
1865 if (TracePageSizes) {
1866 gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1867 description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1868 }
1869 return result;
1870 }
1871
1872 jint G1CollectedHeap::initialize() {
1873 CollectedHeap::pre_initialize();
1874 os::enable_vtime();
1875
1876 G1Log::init();
1877
1878 // Necessary to satisfy locking discipline assertions.
1879
1880 MutexLocker x(Heap_lock);
1881
1882 // We have to initialize the printer before committing the heap, as
1883 // it will be used then.
1884 _hr_printer.set_active(G1PrintHeapRegions);
1885
1886 // While there are no constraints in the GC code that HeapWordSize
1887 // be any particular value, there are multiple other areas in the
1888 // system which believe this to be true (e.g. oop->object_size in some
1889 // cases incorrectly returns the size in wordSize units rather than
1890 // HeapWordSize).
1891 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1892
1893 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1894 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1895 size_t heap_alignment = collector_policy()->heap_alignment();
1896
1897 // Ensure that the sizes are properly aligned.
1898 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1899 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1900 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1901
1902 _refine_cte_cl = new RefineCardTableEntryClosure();
1903
1904 jint ecode = JNI_OK;
1905 _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);
2058 G1AllocRegion::setup(this, dummy_region);
2059
2060 _allocator->init_mutator_alloc_region();
2061
2062 // Do create of the monitoring and management support so that
2063 // values in the heap have been properly initialized.
2064 _g1mm = new G1MonitoringSupport(this);
2065
2066 G1StringDedup::initialize();
2067
2068 _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2069 for (uint i = 0; i < ParallelGCThreads; i++) {
2070 new (&_preserved_objs[i]) OopAndMarkOopStack();
2071 }
2072
2073 return JNI_OK;
2074 }
2075
2076 void G1CollectedHeap::stop() {
2077 // Stop all concurrent threads. We do this to make sure these threads
2078 // do not continue to execute and access resources (e.g. gclog_or_tty)
2079 // that are destroyed during shutdown.
2080 _cg1r->stop();
2081 _cmThread->stop();
2082 if (G1StringDedup::is_enabled()) {
2083 G1StringDedup::stop();
2084 }
2085 }
2086
2087 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2088 return HeapRegion::max_region_size();
2089 }
2090
2091 void G1CollectedHeap::post_initialize() {
2092 CollectedHeap::post_initialize();
2093 ref_processing_init();
2094 }
2095
2096 void G1CollectedHeap::ref_processing_init() {
2097 // Reference processing in G1 currently works as follows:
2098 //
2175 }
2176
2177 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2178 hr->reset_gc_time_stamp();
2179 }
2180
2181 #ifndef PRODUCT
2182
2183 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2184 private:
2185 unsigned _gc_time_stamp;
2186 bool _failures;
2187
2188 public:
2189 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2190 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2191
2192 virtual bool doHeapRegion(HeapRegion* hr) {
2193 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2194 if (_gc_time_stamp != region_gc_time_stamp) {
2195 gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2196 "expected %d", HR_FORMAT_PARAMS(hr),
2197 region_gc_time_stamp, _gc_time_stamp);
2198 _failures = true;
2199 }
2200 return false;
2201 }
2202
2203 bool failures() { return _failures; }
2204 };
2205
2206 void G1CollectedHeap::check_gc_time_stamps() {
2207 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2208 heap_region_iterate(&cl);
2209 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2210 }
2211 #endif // PRODUCT
2212
2213 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2214 _cg1r->hot_card_cache()->drain(cl, worker_i);
2215 }
2216
2764 private:
2765 G1CollectedHeap* _g1h;
2766 VerifyOption _vo;
2767 bool _failures;
2768 public:
2769 // _vo == UsePrevMarking -> use "prev" marking information,
2770 // _vo == UseNextMarking -> use "next" marking information,
2771 // _vo == UseMarkWord -> use mark word from object header.
2772 VerifyRootsClosure(VerifyOption vo) :
2773 _g1h(G1CollectedHeap::heap()),
2774 _vo(vo),
2775 _failures(false) { }
2776
2777 bool failures() { return _failures; }
2778
2779 template <class T> void do_oop_nv(T* p) {
2780 T heap_oop = oopDesc::load_heap_oop(p);
2781 if (!oopDesc::is_null(heap_oop)) {
2782 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2783 if (_g1h->is_obj_dead_cond(obj, _vo)) {
2784 gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2785 "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2786 if (_vo == VerifyOption_G1UseMarkWord) {
2787 gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2788 }
2789 obj->print_on(gclog_or_tty);
2790 _failures = true;
2791 }
2792 }
2793 }
2794
2795 void do_oop(oop* p) { do_oop_nv(p); }
2796 void do_oop(narrowOop* p) { do_oop_nv(p); }
2797 };
2798
2799 class G1VerifyCodeRootOopClosure: public OopClosure {
2800 G1CollectedHeap* _g1h;
2801 OopClosure* _root_cl;
2802 nmethod* _nm;
2803 VerifyOption _vo;
2804 bool _failures;
2805
2806 template <class T> void do_oop_work(T* p) {
2807 // First verify that this root is live
2808 _root_cl->do_oop(p);
2809
2814
2815 // Don't check the code roots during marking verification in a full GC
2816 if (_vo == VerifyOption_G1UseMarkWord) {
2817 return;
2818 }
2819
2820 // Now verify that the current nmethod (which contains p) is
2821 // in the code root list of the heap region containing the
2822 // object referenced by p.
2823
2824 T heap_oop = oopDesc::load_heap_oop(p);
2825 if (!oopDesc::is_null(heap_oop)) {
2826 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2827
2828 // Now fetch the region containing the object
2829 HeapRegion* hr = _g1h->heap_region_containing(obj);
2830 HeapRegionRemSet* hrrs = hr->rem_set();
2831 // Verify that the strong code root list for this region
2832 // contains the nmethod
2833 if (!hrrs->strong_code_roots_list_contains(_nm)) {
2834 gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2835 "from nmethod " PTR_FORMAT " not in strong "
2836 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2837 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2838 _failures = true;
2839 }
2840 }
2841 }
2842
2843 public:
2844 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2845 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2846
2847 void do_oop(oop* p) { do_oop_work(p); }
2848 void do_oop(narrowOop* p) { do_oop_work(p); }
2849
2850 void set_nmethod(nmethod* nm) { _nm = nm; }
2851 bool failures() { return _failures; }
2852 };
2853
2854 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2995 }
2996
2997 bool doHeapRegion(HeapRegion* r) {
2998 // For archive regions, verify there are no heap pointers to
2999 // non-pinned regions. For all others, verify liveness info.
3000 if (r->is_archive()) {
3001 VerifyArchiveRegionClosure verify_oop_pointers(r);
3002 r->object_iterate(&verify_oop_pointers);
3003 return true;
3004 }
3005 if (!r->is_continues_humongous()) {
3006 bool failures = false;
3007 r->verify(_vo, &failures);
3008 if (failures) {
3009 _failures = true;
3010 } else if (!r->is_starts_humongous()) {
3011 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3012 r->object_iterate(¬_dead_yet_cl);
3013 if (_vo != VerifyOption_G1UseNextMarking) {
3014 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3015 gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3016 "max_live_bytes " SIZE_FORMAT " "
3017 "< calculated " SIZE_FORMAT,
3018 p2i(r->bottom()), p2i(r->end()),
3019 r->max_live_bytes(),
3020 not_dead_yet_cl.live_bytes());
3021 _failures = true;
3022 }
3023 } else {
3024 // When vo == UseNextMarking we cannot currently do a sanity
3025 // check on the live bytes as the calculation has not been
3026 // finalized yet.
3027 }
3028 }
3029 }
3030 return false; // stop the region iteration if we hit a failure
3031 }
3032 };
3033
3034 // This is the task used for parallel verification of the heap regions
3035
3036 class G1ParVerifyTask: public AbstractGangTask {
3037 private:
3038 G1CollectedHeap* _g1h;
3039 VerifyOption _vo;
3040 bool _failures;
3048 AbstractGangTask("Parallel verify task"),
3049 _g1h(g1h),
3050 _vo(vo),
3051 _failures(false),
3052 _hrclaimer(g1h->workers()->active_workers()) {}
3053
3054 bool failures() {
3055 return _failures;
3056 }
3057
3058 void work(uint worker_id) {
3059 HandleMark hm;
3060 VerifyRegionClosure blk(true, _vo);
3061 _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3062 if (blk.failures()) {
3063 _failures = true;
3064 }
3065 }
3066 };
3067
3068 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3069 if (SafepointSynchronize::is_at_safepoint()) {
3070 assert(Thread::current()->is_VM_thread(),
3071 "Expected to be executed serially by the VM thread at this point");
3072
3073 if (!silent) { gclog_or_tty->print("Roots "); }
3074 VerifyRootsClosure rootsCl(vo);
3075 VerifyKlassClosure klassCl(this, &rootsCl);
3076 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3077
3078 // We apply the relevant closures to all the oops in the
3079 // system dictionary, class loader data graph, the string table
3080 // and the nmethods in the code cache.
3081 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3082 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3083
3084 {
3085 G1RootProcessor root_processor(this, 1);
3086 root_processor.process_all_roots(&rootsCl,
3087 &cldCl,
3088 &blobsCl);
3089 }
3090
3091 bool failures = rootsCl.failures() || codeRootsCl.failures();
3092
3093 if (vo != VerifyOption_G1UseMarkWord) {
3094 // If we're verifying during a full GC then the region sets
3095 // will have been torn down at the start of the GC. Therefore
3096 // verifying the region sets will fail. So we only verify
3097 // the region sets when not in a full GC.
3098 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3099 verify_region_sets();
3100 }
3101
3102 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3103 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3104
3105 G1ParVerifyTask task(this, vo);
3106 workers()->run_task(&task);
3107 if (task.failures()) {
3108 failures = true;
3109 }
3110
3111 } else {
3112 VerifyRegionClosure blk(false, vo);
3113 heap_region_iterate(&blk);
3114 if (blk.failures()) {
3115 failures = true;
3116 }
3117 }
3118
3119 if (G1StringDedup::is_enabled()) {
3120 if (!silent) gclog_or_tty->print("StrDedup ");
3121 G1StringDedup::verify();
3122 }
3123
3124 if (failures) {
3125 gclog_or_tty->print_cr("Heap:");
3126 // It helps to have the per-region information in the output to
3127 // help us track down what went wrong. This is why we call
3128 // print_extended_on() instead of print_on().
3129 print_extended_on(gclog_or_tty);
3130 gclog_or_tty->cr();
3131 gclog_or_tty->flush();
3132 }
3133 guarantee(!failures, "there should not have been any failures");
3134 } else {
3135 if (!silent) {
3136 gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3137 if (G1StringDedup::is_enabled()) {
3138 gclog_or_tty->print(", StrDedup");
3139 }
3140 gclog_or_tty->print(") ");
3141 }
3142 }
3143 }
3144
3145 void G1CollectedHeap::verify(bool silent) {
3146 verify(silent, VerifyOption_G1UsePrevMarking);
3147 }
3148
3149 double G1CollectedHeap::verify(bool guard, const char* msg) {
3150 double verify_time_ms = 0.0;
3151
3152 if (guard && total_collections() >= VerifyGCStartAt) {
3153 double verify_start = os::elapsedTime();
3154 HandleMark hm; // Discard invalid handles created during verification
3155 prepare_for_verify();
3156 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3157 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3158 }
3159
3160 return verify_time_ms;
3161 }
3162
3163 void G1CollectedHeap::verify_before_gc() {
3164 double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3165 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3166 }
3167
3168 void G1CollectedHeap::verify_after_gc() {
3169 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3170 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3171 }
3172
3173 class PrintRegionClosure: public HeapRegionClosure {
3174 outputStream* _st;
3175 public:
3176 PrintRegionClosure(outputStream* st) : _st(st) {}
3177 bool doHeapRegion(HeapRegion* r) {
3178 r->print_on(_st);
3179 return false;
3180 }
3181 };
3182
3183 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3184 const HeapRegion* hr,
3185 const VerifyOption vo) const {
3186 switch (vo) {
3187 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3188 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3189 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
3259 G1StringDedup::print_worker_threads_on(st);
3260 }
3261 }
3262
3263 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3264 workers()->threads_do(tc);
3265 tc->do_thread(_cmThread);
3266 _cg1r->threads_do(tc);
3267 if (G1StringDedup::is_enabled()) {
3268 G1StringDedup::threads_do(tc);
3269 }
3270 }
3271
3272 void G1CollectedHeap::print_tracing_info() const {
3273 // We'll overload this to mean "trace GC pause statistics."
3274 if (TraceYoungGenTime || TraceOldGenTime) {
3275 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3276 // to that.
3277 g1_policy()->print_tracing_info();
3278 }
3279 if (G1SummarizeRSetStats) {
3280 g1_rem_set()->print_summary_info();
3281 }
3282 if (G1SummarizeConcMark) {
3283 concurrent_mark()->print_summary_info();
3284 }
3285 g1_policy()->print_yg_surv_rate_info();
3286 }
3287
3288 #ifndef PRODUCT
3289 // Helpful for debugging RSet issues.
3290
3291 class PrintRSetsClosure : public HeapRegionClosure {
3292 private:
3293 const char* _msg;
3294 size_t _occupied_sum;
3295
3296 public:
3297 bool doHeapRegion(HeapRegion* r) {
3298 HeapRegionRemSet* hrrs = r->rem_set();
3299 size_t occupied = hrrs->occupied();
3300 _occupied_sum += occupied;
3301
3302 gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3303 HR_FORMAT_PARAMS(r));
3304 if (occupied == 0) {
3305 gclog_or_tty->print_cr(" RSet is empty");
3306 } else {
3307 hrrs->print();
3308 }
3309 gclog_or_tty->print_cr("----------");
3310 return false;
3311 }
3312
3313 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3314 gclog_or_tty->cr();
3315 gclog_or_tty->print_cr("========================================");
3316 gclog_or_tty->print_cr("%s", msg);
3317 gclog_or_tty->cr();
3318 }
3319
3320 ~PrintRSetsClosure() {
3321 gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3322 gclog_or_tty->print_cr("========================================");
3323 gclog_or_tty->cr();
3324 }
3325 };
3326
3327 void G1CollectedHeap::print_cset_rsets() {
3328 PrintRSetsClosure cl("Printing CSet RSets");
3329 collection_set_iterate(&cl);
3330 }
3331
3332 void G1CollectedHeap::print_all_rsets() {
3333 PrintRSetsClosure cl("Printing All RSets");;
3334 heap_region_iterate(&cl);
3335 }
3336 #endif // PRODUCT
3337
3338 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3339 YoungList* young_list = heap()->young_list();
3340
3341 size_t eden_used_bytes = young_list->eden_used_bytes();
3342 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3343
3361
3362 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3363 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3364 }
3365
3366
3367 G1CollectedHeap* G1CollectedHeap::heap() {
3368 CollectedHeap* heap = Universe::heap();
3369 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3370 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3371 return (G1CollectedHeap*)heap;
3372 }
3373
3374 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3375 // always_do_update_barrier = false;
3376 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3377 // Fill TLAB's and such
3378 accumulate_statistics_all_tlabs();
3379 ensure_parsability(true);
3380
3381 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3382 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3383 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3384 }
3385 }
3386
3387 void G1CollectedHeap::gc_epilogue(bool full) {
3388
3389 if (G1SummarizeRSetStats &&
3390 (G1SummarizeRSetStatsPeriod > 0) &&
3391 // we are at the end of the GC. Total collections has already been increased.
3392 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3393 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3394 }
3395
3396 // FIXME: what is this about?
3397 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3398 // is set.
3399 #if defined(COMPILER2) || INCLUDE_JVMCI
3400 assert(DerivedPointerTable::is_empty(), "derived pointer present");
3401 #endif
3402 // always_do_update_barrier = true;
3403
3404 resize_all_tlabs();
3405 allocation_context_stats().update(full);
3406
3407 // We have just completed a GC. Update the soft reference
3408 // policy with the new heap occupancy
3409 Universe::update_heap_info_at_gc();
3410 }
3411
3412 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3413 uint gc_count_before,
3414 bool* succeeded,
3620 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3621
3622 // Here's a good place to add any other checks we'd like to
3623 // perform on CSet regions.
3624 return false;
3625 }
3626 };
3627 #endif // ASSERT
3628
3629 uint G1CollectedHeap::num_task_queues() const {
3630 return _task_queues->size();
3631 }
3632
3633 #if TASKQUEUE_STATS
3634 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3635 st->print_raw_cr("GC Task Stats");
3636 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3637 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3638 }
3639
3640 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3641 print_taskqueue_stats_hdr(st);
3642
3643 TaskQueueStats totals;
3644 const uint n = num_task_queues();
3645 for (uint i = 0; i < n; ++i) {
3646 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3647 totals += task_queue(i)->stats;
3648 }
3649 st->print_raw("tot "); totals.print(st); st->cr();
3650
3651 DEBUG_ONLY(totals.verify());
3652 }
3653
3654 void G1CollectedHeap::reset_taskqueue_stats() {
3655 const uint n = num_task_queues();
3656 for (uint i = 0; i < n; ++i) {
3657 task_queue(i)->stats.reset();
3658 }
3659 }
3660 #endif // TASKQUEUE_STATS
3661
3662 void G1CollectedHeap::log_gc_header() {
3663 if (!G1Log::fine()) {
3664 return;
3665 }
3666
3667 gclog_or_tty->gclog_stamp();
3668
3669 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3670 .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3671 .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3672
3673 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3674 }
3675
3676 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3677 if (!G1Log::fine()) {
3678 return;
3679 }
3680
3681 if (G1Log::finer()) {
3682 if (evacuation_failed()) {
3683 gclog_or_tty->print(" (to-space exhausted)");
3684 }
3685 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3686 g1_policy()->print_phases(pause_time_sec);
3687 g1_policy()->print_detailed_heap_transition();
3688 } else {
3689 if (evacuation_failed()) {
3690 gclog_or_tty->print("--");
3691 }
3692 g1_policy()->print_heap_transition();
3693 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3694 }
3695 gclog_or_tty->flush();
3696 }
3697
3698 void G1CollectedHeap::wait_for_root_region_scanning() {
3699 double scan_wait_start = os::elapsedTime();
3700 // We have to wait until the CM threads finish scanning the
3701 // root regions as it's the only way to ensure that all the
3702 // objects on them have been correctly scanned before we start
3703 // moving them during the GC.
3704 bool waited = _cm->root_regions()->wait_until_scan_finished();
3705 double wait_time_ms = 0.0;
3706 if (waited) {
3707 double scan_wait_end = os::elapsedTime();
3708 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3709 }
3710 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3711 }
3712
3713 bool
3714 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3715 assert_at_safepoint(true /* should_be_vm_thread */);
3716 guarantee(!is_gc_active(), "collection is not reentrant");
3717
3718 if (GC_locker::check_active_before_gc()) {
3719 return false;
3720 }
3721
3722 _gc_timer_stw->register_gc_start();
3723
3724 GCIdMark gc_id_mark;
3725 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3726
3727 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3728 ResourceMark rm;
3729
3730 wait_for_root_region_scanning();
3731
3732 G1Log::update_level();
3733 print_heap_before_gc();
3734 trace_heap_before_gc(_gc_tracer_stw);
3735
3736 verify_region_sets_optional();
3737 verify_dirty_young_regions();
3738
3739 // This call will decide whether this pause is an initial-mark
3740 // pause. If it is, during_initial_mark_pause() will return true
3741 // for the duration of this pause.
3742 g1_policy()->decide_on_conc_mark_initiation();
3743
3744 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3745 assert(!collector_state()->during_initial_mark_pause() ||
3746 collector_state()->gcs_are_young(), "sanity");
3747
3748 // We also do not allow mixed GCs during marking.
3749 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3750
3751 // Record whether this pause is an initial mark. When the current
3752 // thread has completed its logging output and it's safe to signal
3753 // the CM thread, the flag's value in the policy has been reset.
3754 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3755
3756 // Inner scope for scope based logging, timers, and stats collection
3757 {
3758 EvacuationInfo evacuation_info;
3759
3760 if (collector_state()->during_initial_mark_pause()) {
3761 // We are about to start a marking cycle, so we increment the
3762 // full collection counter.
3763 increment_old_marking_cycles_started();
3764 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3765 }
3766
3767 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3768
3769 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3770
3771 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3772 workers()->active_workers(),
3773 Threads::number_of_non_daemon_threads());
3774 workers()->set_active_workers(active_workers);
3775
3776 double pause_start_sec = os::elapsedTime();
3777 g1_policy()->note_gc_start(active_workers);
3778 log_gc_header();
3779
3780 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3781 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3782
3783 // If the secondary_free_list is not empty, append it to the
3784 // free_list. No need to wait for the cleanup operation to finish;
3785 // the region allocation code will check the secondary_free_list
3786 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3787 // set, skip this step so that the region allocation code has to
3788 // get entries from the secondary_free_list.
3789 if (!G1StressConcRegionFreeing) {
3790 append_secondary_free_list_if_not_empty_with_lock();
3791 }
3792
3793 assert(check_young_list_well_formed(), "young list should be well formed");
3794
3795 // Don't dynamically change the number of GC threads this early. A value of
3796 // 0 is used to indicate serial work. When parallel work is done,
3797 // it will be set.
3798
3812 #endif
3813
3814 // Please see comment in g1CollectedHeap.hpp and
3815 // G1CollectedHeap::ref_processing_init() to see how
3816 // reference processing currently works in G1.
3817
3818 // Enable discovery in the STW reference processor
3819 ref_processor_stw()->enable_discovery();
3820
3821 {
3822 // We want to temporarily turn off discovery by the
3823 // CM ref processor, if necessary, and turn it back on
3824 // on again later if we do. Using a scoped
3825 // NoRefDiscovery object will do this.
3826 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3827
3828 // Forget the current alloc region (we might even choose it to be part
3829 // of the collection set!).
3830 _allocator->release_mutator_alloc_region();
3831
3832 // We should call this after we retire the mutator alloc
3833 // region(s) so that all the ALLOC / RETIRE events are generated
3834 // before the start GC event.
3835 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3836
3837 // This timing is only used by the ergonomics to handle our pause target.
3838 // It is unclear why this should not include the full pause. We will
3839 // investigate this in CR 7178365.
3840 //
3841 // Preserving the old comment here if that helps the investigation:
3842 //
3843 // The elapsed time induced by the start time below deliberately elides
3844 // the possible verification above.
3845 double sample_start_time_sec = os::elapsedTime();
3846
3847 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3848
3849 if (collector_state()->during_initial_mark_pause()) {
3850 concurrent_mark()->checkpointRootsInitialPre();
3851 }
3852
3853 double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3854 g1_policy()->finalize_old_cset_part(time_remaining_ms);
3855
3856 evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3940
3941 if (collector_state()->during_initial_mark_pause()) {
3942 // We have to do this before we notify the CM threads that
3943 // they can start working to make sure that all the
3944 // appropriate initialization is done on the CM object.
3945 concurrent_mark()->checkpointRootsInitialPost();
3946 collector_state()->set_mark_in_progress(true);
3947 // Note that we don't actually trigger the CM thread at
3948 // this point. We do that later when we're sure that
3949 // the current thread has completed its logging output.
3950 }
3951
3952 allocate_dummy_regions();
3953
3954 _allocator->init_mutator_alloc_region();
3955
3956 {
3957 size_t expand_bytes = g1_policy()->expansion_amount();
3958 if (expand_bytes > 0) {
3959 size_t bytes_before = capacity();
3960 // No need for an ergo verbose message here,
3961 // expansion_amount() does this when it returns a value > 0.
3962 double expand_ms;
3963 if (!expand(expand_bytes, &expand_ms)) {
3964 // We failed to expand the heap. Cannot do anything about it.
3965 }
3966 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3967 }
3968 }
3969
3970 // We redo the verification but now wrt to the new CSet which
3971 // has just got initialized after the previous CSet was freed.
3972 _cm->verify_no_cset_oops();
3973 _cm->note_end_of_gc();
3974
3975 // This timing is only used by the ergonomics to handle our pause target.
3976 // It is unclear why this should not include the full pause. We will
3977 // investigate this in CR 7178365.
3978 double sample_end_time_sec = os::elapsedTime();
3979 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3980 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
4000 // stamp here we invalidate all the GC time stamps on all the
4001 // regions and saved_mark_word() will simply return top() for
4002 // all the regions. This is a nicer way of ensuring this rather
4003 // than iterating over the regions and fixing them. In fact, the
4004 // GC time stamp increment here also ensures that
4005 // saved_mark_word() will return top() between pauses, i.e.,
4006 // during concurrent refinement. So we don't need the
4007 // is_gc_active() check to decided which top to use when
4008 // scanning cards (see CR 7039627).
4009 increment_gc_time_stamp();
4010
4011 verify_after_gc();
4012 check_bitmaps("GC End");
4013
4014 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4015 ref_processor_stw()->verify_no_references_recorded();
4016
4017 // CM reference discovery will be re-enabled if necessary.
4018 }
4019
4020 // We should do this after we potentially expand the heap so
4021 // that all the COMMIT events are generated before the end GC
4022 // event, and after we retire the GC alloc regions so that all
4023 // RETIRE events are generated before the end GC event.
4024 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4025
4026 #ifdef TRACESPINNING
4027 ParallelTaskTerminator::print_termination_counts();
4028 #endif
4029
4030 gc_epilogue(false);
4031 }
4032
4033 // Print the remainder of the GC log output.
4034 log_gc_footer(os::elapsedTime() - pause_start_sec);
4035
4036 // It is not yet to safe to tell the concurrent mark to
4037 // start as we have some optional output below. We don't want the
4038 // output from the concurrent mark thread interfering with this
4039 // logging output either.
4040
4041 _hrm.verify_optional();
4042 verify_region_sets_optional();
4043
4044 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4045 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4046
4047 print_heap_after_gc();
4048 trace_heap_after_gc(_gc_tracer_stw);
4049
4050 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4051 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4052 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4053 // before any GC notifications are raised.
4054 g1mm()->update_sizes();
4055
4056 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4057 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4058 _gc_timer_stw->register_gc_end();
4059 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4060 }
4061 // It should now be safe to tell the concurrent mark thread to start
4062 // without its logging output interfering with the logging output
4063 // that came from the pause.
4064
4215
4216 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4217
4218 double term_sec = 0.0;
4219 size_t evac_term_attempts = 0;
4220 {
4221 double start = os::elapsedTime();
4222 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4223 evac.do_void();
4224
4225 evac_term_attempts = evac.term_attempts();
4226 term_sec = evac.term_time();
4227 double elapsed_sec = os::elapsedTime() - start;
4228 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4229 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4230 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4231 }
4232
4233 assert(pss->queue_is_empty(), "should be empty");
4234
4235 if (PrintTerminationStats) {
4236 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4237 size_t lab_waste;
4238 size_t lab_undo_waste;
4239 pss->waste(lab_waste, lab_undo_waste);
4240 _g1h->print_termination_stats(gclog_or_tty,
4241 worker_id,
4242 (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */
4243 strong_roots_sec * 1000.0, /* strong roots time */
4244 term_sec * 1000.0, /* evac term time */
4245 evac_term_attempts, /* evac term attempts */
4246 lab_waste, /* alloc buffer waste */
4247 lab_undo_waste /* undo waste */
4248 );
4249 }
4250
4251 // Close the inner scope so that the ResourceMark and HandleMark
4252 // destructors are executed here and are included as part of the
4253 // "GC Worker Time".
4254 }
4255 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4256 }
4257 };
4258
4259 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4260 st->print_raw_cr("GC Termination Stats");
4261 st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
4262 st->print_raw_cr("thr ms ms % ms % attempts total alloc undo");
4263 st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4264 }
4265
4266 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4267 uint worker_id,
4268 double elapsed_ms,
4269 double strong_roots_ms,
4270 double term_ms,
4271 size_t term_attempts,
4272 size_t alloc_buffer_waste,
4273 size_t undo_waste) const {
4274 st->print_cr("%3d %9.2f %9.2f %6.2f "
4275 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4276 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4277 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4278 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4279 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4280 alloc_buffer_waste * HeapWordSize / K,
4281 undo_waste * HeapWordSize / K);
4282 }
4283
4284 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4285 private:
4286 BoolObjectClosure* _is_alive;
4287 int _initial_string_table_size;
4288 int _initial_symbol_table_size;
4289
4290 bool _process_strings;
4291 int _strings_processed;
4292 int _strings_removed;
4293
4294 bool _process_symbols;
4303 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4304
4305 _initial_string_table_size = StringTable::the_table()->table_size();
4306 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4307 if (process_strings) {
4308 StringTable::clear_parallel_claimed_index();
4309 }
4310 if (process_symbols) {
4311 SymbolTable::clear_parallel_claimed_index();
4312 }
4313 }
4314
4315 ~G1StringSymbolTableUnlinkTask() {
4316 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4317 "claim value %d after unlink less than initial string table size %d",
4318 StringTable::parallel_claimed_index(), _initial_string_table_size);
4319 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4320 "claim value %d after unlink less than initial symbol table size %d",
4321 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4322
4323 if (G1TraceStringSymbolTableScrubbing) {
4324 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4325 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4326 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4327 strings_processed(), strings_removed(),
4328 symbols_processed(), symbols_removed());
4329 }
4330 }
4331
4332 void work(uint worker_id) {
4333 int strings_processed = 0;
4334 int strings_removed = 0;
4335 int symbols_processed = 0;
4336 int symbols_removed = 0;
4337 if (_process_strings) {
4338 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4339 Atomic::add(strings_processed, &_strings_processed);
4340 Atomic::add(strings_removed, &_strings_removed);
4341 }
4342 if (_process_symbols) {
4343 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4344 Atomic::add(symbols_processed, &_symbols_processed);
4345 Atomic::add(symbols_removed, &_symbols_removed);
4346 }
4347 }
4348
4349 size_t strings_processed() const { return (size_t)_strings_processed; }
4350 size_t strings_removed() const { return (size_t)_strings_removed; }
5137
5138 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5139 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5140
5141 // Should G1EvacuationFailureALot be in effect for this GC?
5142 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5143
5144 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5145 double start_par_time_sec = os::elapsedTime();
5146 double end_par_time_sec;
5147
5148 {
5149 const uint n_workers = workers()->active_workers();
5150 G1RootProcessor root_processor(this, n_workers);
5151 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5152 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5153 if (collector_state()->during_initial_mark_pause()) {
5154 ClassLoaderDataGraph::clear_claimed_marks();
5155 }
5156
5157 // The individual threads will set their evac-failure closures.
5158 if (PrintTerminationStats) {
5159 print_termination_stats_hdr(gclog_or_tty);
5160 }
5161
5162 workers()->run_task(&g1_par_task);
5163 end_par_time_sec = os::elapsedTime();
5164
5165 // Closing the inner scope will execute the destructor
5166 // for the G1RootProcessor object. We record the current
5167 // elapsed time before closing the scope so that time
5168 // taken for the destructor is NOT included in the
5169 // reported parallel time.
5170 }
5171
5172 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5173
5174 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5175 phase_times->record_par_time(par_time_ms);
5176
5177 double code_root_fixup_time_ms =
5178 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5179 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5180
5368 }
5369 }
5370
5371 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5372 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5373 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5374 verify_dirty_region(hr);
5375 }
5376 }
5377
5378 void G1CollectedHeap::verify_dirty_young_regions() {
5379 verify_dirty_young_list(_young_list->first_region());
5380 }
5381
5382 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5383 HeapWord* tams, HeapWord* end) {
5384 guarantee(tams <= end,
5385 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5386 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5387 if (result < end) {
5388 gclog_or_tty->cr();
5389 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5390 bitmap_name, p2i(result));
5391 gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5392 bitmap_name, p2i(tams), p2i(end));
5393 return false;
5394 }
5395 return true;
5396 }
5397
5398 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5399 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5400 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5401
5402 HeapWord* bottom = hr->bottom();
5403 HeapWord* ptams = hr->prev_top_at_mark_start();
5404 HeapWord* ntams = hr->next_top_at_mark_start();
5405 HeapWord* end = hr->end();
5406
5407 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5408
5409 bool res_n = true;
5410 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5411 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5412 // if we happen to be in that state.
5413 if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5414 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5415 }
5416 if (!res_p || !res_n) {
5417 gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5418 HR_FORMAT_PARAMS(hr));
5419 gclog_or_tty->print_cr("#### Caller: %s", caller);
5420 return false;
5421 }
5422 return true;
5423 }
5424
5425 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5426 if (!G1VerifyBitmaps) return;
5427
5428 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5429 }
5430
5431 class G1VerifyBitmapClosure : public HeapRegionClosure {
5432 private:
5433 const char* _caller;
5434 G1CollectedHeap* _g1h;
5435 bool _failures;
5436
5437 public:
5438 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5439 _caller(caller), _g1h(g1h), _failures(false) { }
5451
5452 void G1CollectedHeap::check_bitmaps(const char* caller) {
5453 if (!G1VerifyBitmaps) return;
5454
5455 G1VerifyBitmapClosure cl(caller, this);
5456 heap_region_iterate(&cl);
5457 guarantee(!cl.failures(), "bitmap verification");
5458 }
5459
5460 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5461 private:
5462 bool _failures;
5463 public:
5464 G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5465
5466 virtual bool doHeapRegion(HeapRegion* hr) {
5467 uint i = hr->hrm_index();
5468 InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5469 if (hr->is_humongous()) {
5470 if (hr->in_collection_set()) {
5471 gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5472 _failures = true;
5473 return true;
5474 }
5475 if (cset_state.is_in_cset()) {
5476 gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5477 _failures = true;
5478 return true;
5479 }
5480 if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5481 gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5482 _failures = true;
5483 return true;
5484 }
5485 } else {
5486 if (cset_state.is_humongous()) {
5487 gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5488 _failures = true;
5489 return true;
5490 }
5491 if (hr->in_collection_set() != cset_state.is_in_cset()) {
5492 gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5493 hr->in_collection_set(), cset_state.value(), i);
5494 _failures = true;
5495 return true;
5496 }
5497 if (cset_state.is_in_cset()) {
5498 if (hr->is_young() != (cset_state.is_young())) {
5499 gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5500 hr->is_young(), cset_state.value(), i);
5501 _failures = true;
5502 return true;
5503 }
5504 if (hr->is_old() != (cset_state.is_old())) {
5505 gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5506 hr->is_old(), cset_state.value(), i);
5507 _failures = true;
5508 return true;
5509 }
5510 }
5511 }
5512 return false;
5513 }
5514
5515 bool failures() const { return _failures; }
5516 };
5517
5518 bool G1CollectedHeap::check_cset_fast_test() {
5519 G1CheckCSetFastTableClosure cl;
5520 _hrm.iterate(&cl);
5521 return !cl.failures();
5522 }
5523 #endif // PRODUCT
5524
5525 void G1CollectedHeap::cleanUpCardTable() {
5695 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5696 // until the end of a concurrent mark.
5697 //
5698 // It is not required to check whether the object has been found dead by marking
5699 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5700 // all objects allocated during that time are considered live.
5701 // SATB marking is even more conservative than the remembered set.
5702 // So if at this point in the collection there is no remembered set entry,
5703 // nobody has a reference to it.
5704 // At the start of collection we flush all refinement logs, and remembered sets
5705 // are completely up-to-date wrt to references to the humongous object.
5706 //
5707 // Other implementation considerations:
5708 // - never consider object arrays at this time because they would pose
5709 // considerable effort for cleaning up the the remembered sets. This is
5710 // required because stale remembered sets might reference locations that
5711 // are currently allocated into.
5712 uint region_idx = r->hrm_index();
5713 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5714 !r->rem_set()->is_empty()) {
5715
5716 if (G1TraceEagerReclaimHumongousObjects) {
5717 gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5718 region_idx,
5719 (size_t)obj->size() * HeapWordSize,
5720 p2i(r->bottom()),
5721 r->rem_set()->occupied(),
5722 r->rem_set()->strong_code_roots_list_length(),
5723 next_bitmap->isMarked(r->bottom()),
5724 g1h->is_humongous_reclaim_candidate(region_idx),
5725 obj->is_typeArray()
5726 );
5727 }
5728
5729 return false;
5730 }
5731
5732 guarantee(obj->is_typeArray(),
5733 "Only eagerly reclaiming type arrays is supported, but the object "
5734 PTR_FORMAT " is not.", p2i(r->bottom()));
5735
5736 if (G1TraceEagerReclaimHumongousObjects) {
5737 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5738 region_idx,
5739 (size_t)obj->size() * HeapWordSize,
5740 p2i(r->bottom()),
5741 r->rem_set()->occupied(),
5742 r->rem_set()->strong_code_roots_list_length(),
5743 next_bitmap->isMarked(r->bottom()),
5744 g1h->is_humongous_reclaim_candidate(region_idx),
5745 obj->is_typeArray()
5746 );
5747 }
5748 // Need to clear mark bit of the humongous object if already set.
5749 if (next_bitmap->isMarked(r->bottom())) {
5750 next_bitmap->clear(r->bottom());
5751 }
5752 do {
5753 HeapRegion* next = g1h->next_region_in_humongous(r);
5754 _freed_bytes += r->used();
5755 r->set_containing_set(NULL);
5756 _humongous_regions_removed.increment(1u, r->capacity());
5757 g1h->free_humongous_region(r, _free_region_list, false);
5758 r = next;
5759 } while (r != NULL);
5760
5761 return false;
5762 }
5763
5764 HeapRegionSetCount& humongous_free_count() {
5765 return _humongous_regions_removed;
5766 }
5767
5768 size_t bytes_freed() const {
5769 return _freed_bytes;
5770 }
5771
5772 size_t humongous_reclaimed() const {
5773 return _humongous_regions_removed.length();
5774 }
5775 };
5776
5777 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5778 assert_at_safepoint(true);
5779
5780 if (!G1EagerReclaimHumongousObjects ||
5781 (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
5782 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5783 return;
5784 }
5785
5786 double start_time = os::elapsedTime();
5787
5788 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5789
5790 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5791 heap_region_iterate(&cl);
5792
5793 HeapRegionSetCount empty_set;
5794 remove_from_old_sets(empty_set, cl.humongous_free_count());
5795
5796 G1HRPrinter* hrp = hr_printer();
5797 if (hrp->is_active()) {
5798 FreeRegionListIterator iter(&local_cleanup_list);
5799 while (iter.more_available()) {
5800 HeapRegion* hr = iter.get_next();
5801 hrp->cleanup(hr);
5814 // the current incremental collection set in preparation of a
5815 // full collection. After the full GC we will start to build up
5816 // the incremental collection set again.
5817 // This is only called when we're doing a full collection
5818 // and is immediately followed by the tearing down of the young list.
5819
5820 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5821 HeapRegion* cur = cs_head;
5822
5823 while (cur != NULL) {
5824 HeapRegion* next = cur->next_in_collection_set();
5825 assert(cur->in_collection_set(), "bad CS");
5826 cur->set_next_in_collection_set(NULL);
5827 clear_in_cset(cur);
5828 cur->set_young_index_in_cset(-1);
5829 cur = next;
5830 }
5831 }
5832
5833 void G1CollectedHeap::set_free_regions_coming() {
5834 if (G1ConcRegionFreeingVerbose) {
5835 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5836 "setting free regions coming");
5837 }
5838
5839 assert(!free_regions_coming(), "pre-condition");
5840 _free_regions_coming = true;
5841 }
5842
5843 void G1CollectedHeap::reset_free_regions_coming() {
5844 assert(free_regions_coming(), "pre-condition");
5845
5846 {
5847 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5848 _free_regions_coming = false;
5849 SecondaryFreeList_lock->notify_all();
5850 }
5851
5852 if (G1ConcRegionFreeingVerbose) {
5853 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5854 "reset free regions coming");
5855 }
5856 }
5857
5858 void G1CollectedHeap::wait_while_free_regions_coming() {
5859 // Most of the time we won't have to wait, so let's do a quick test
5860 // first before we take the lock.
5861 if (!free_regions_coming()) {
5862 return;
5863 }
5864
5865 if (G1ConcRegionFreeingVerbose) {
5866 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5867 "waiting for free regions");
5868 }
5869
5870 {
5871 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5872 while (free_regions_coming()) {
5873 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5874 }
5875 }
5876
5877 if (G1ConcRegionFreeingVerbose) {
5878 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5879 "done waiting for free regions");
5880 }
5881 }
5882
5883 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5884 return _allocator->is_retained_old_region(hr);
5885 }
5886
5887 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5888 _young_list->push_region(hr);
5889 }
5890
5891 class NoYoungRegionsClosure: public HeapRegionClosure {
5892 private:
5893 bool _success;
5894 public:
5895 NoYoungRegionsClosure() : _success(true) { }
5896 bool doHeapRegion(HeapRegion* r) {
5897 if (r->is_young()) {
5898 gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5899 p2i(r->bottom()), p2i(r->end()));
5900 _success = false;
5901 }
5902 return false;
5903 }
5904 bool success() { return _success; }
5905 };
5906
5907 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5908 bool ret = _young_list->check_list_empty(check_sample);
5909
5910 if (check_heap) {
5911 NoYoungRegionsClosure closure;
5912 heap_region_iterate(&closure);
5913 ret = ret && closure.success();
5914 }
5915
5916 return ret;
5917 }
5918
6129 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6130 size_t allocated_bytes,
6131 InCSetState dest) {
6132 bool during_im = collector_state()->during_initial_mark_pause();
6133 alloc_region->note_end_of_copying(during_im);
6134 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6135 if (dest.is_young()) {
6136 young_list()->add_survivor_region(alloc_region);
6137 } else {
6138 _old_set.add(alloc_region);
6139 }
6140 _hr_printer.retire(alloc_region);
6141 }
6142
6143 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6144 bool expanded = false;
6145 uint index = _hrm.find_highest_free(&expanded);
6146
6147 if (index != G1_NO_HRM_INDEX) {
6148 if (expanded) {
6149 ergo_verbose1(ErgoHeapSizing,
6150 "attempt heap expansion",
6151 ergo_format_reason("requested address range outside heap bounds")
6152 ergo_format_byte("region size"),
6153 HeapRegion::GrainWords * HeapWordSize);
6154 }
6155 _hrm.allocate_free_regions_starting_at(index, 1);
6156 return region_at(index);
6157 }
6158 return NULL;
6159 }
6160
6161 // Heap region set verification
6162
6163 class VerifyRegionListsClosure : public HeapRegionClosure {
6164 private:
6165 HeapRegionSet* _old_set;
6166 HeapRegionSet* _humongous_set;
6167 HeapRegionManager* _hrm;
6168
6169 public:
6170 HeapRegionSetCount _old_count;
6171 HeapRegionSetCount _humongous_count;
6172 HeapRegionSetCount _free_count;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/concurrentG1Refine.hpp"
32 #include "gc/g1/concurrentG1RefineThread.hpp"
33 #include "gc/g1/concurrentMarkThread.inline.hpp"
34 #include "gc/g1/g1Allocator.inline.hpp"
35 #include "gc/g1/g1CollectedHeap.inline.hpp"
36 #include "gc/g1/g1CollectorPolicy.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1EvacFailure.hpp"
39 #include "gc/g1/g1GCPhaseTimes.hpp"
40 #include "gc/g1/g1MarkSweep.hpp"
41 #include "gc/g1/g1OopClosures.inline.hpp"
42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
43 #include "gc/g1/g1RegionToSpaceMapper.hpp"
44 #include "gc/g1/g1RemSet.inline.hpp"
45 #include "gc/g1/g1RootClosures.hpp"
46 #include "gc/g1/g1RootProcessor.hpp"
47 #include "gc/g1/g1StringDedup.hpp"
48 #include "gc/g1/g1YCTypes.hpp"
49 #include "gc/g1/heapRegion.inline.hpp"
50 #include "gc/g1/heapRegionRemSet.hpp"
51 #include "gc/g1/heapRegionSet.inline.hpp"
52 #include "gc/g1/suspendibleThreadSet.hpp"
53 #include "gc/g1/vm_operations_g1.hpp"
54 #include "gc/shared/gcHeapSummary.hpp"
55 #include "gc/shared/gcId.hpp"
56 #include "gc/shared/gcLocker.inline.hpp"
57 #include "gc/shared/gcTimer.hpp"
58 #include "gc/shared/gcTrace.hpp"
59 #include "gc/shared/gcTraceTime.hpp"
60 #include "gc/shared/generationSpec.hpp"
61 #include "gc/shared/isGCActiveMark.hpp"
62 #include "gc/shared/referenceProcessor.hpp"
63 #include "gc/shared/taskqueue.inline.hpp"
64 #include "logging/log.hpp"
65 #include "memory/allocation.hpp"
66 #include "memory/iterator.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "runtime/atomic.inline.hpp"
69 #include "runtime/init.hpp"
70 #include "runtime/orderAccess.inline.hpp"
71 #include "runtime/vmThread.hpp"
72 #include "utilities/globalDefinitions.hpp"
73 #include "utilities/stack.inline.hpp"
74
75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
76
77 // INVARIANTS/NOTES
78 //
79 // All allocation activity covered by the G1CollectedHeap interface is
80 // serialized by acquiring the HeapLock. This happens in mem_allocate
81 // and allocate_new_tlab, which are the "entry" points to the
82 // allocation code from the rest of the JVM. (Note that this does not
83 // apply to TLAB allocation, which is not part of this interface: it
84 // is done by clients of this interface.)
186 } while (hr != head);
187 assert(hr != NULL, "invariant");
188 hr->set_next_dirty_cards_region(NULL);
189 return hr;
190 }
191
192 // Returns true if the reference points to an object that
193 // can move in an incremental collection.
194 bool G1CollectedHeap::is_scavengable(const void* p) {
195 HeapRegion* hr = heap_region_containing(p);
196 return !hr->is_pinned();
197 }
198
199 // Private methods.
200
201 HeapRegion*
202 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
203 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
204 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
205 if (!_secondary_free_list.is_empty()) {
206 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
207 "secondary_free_list has %u entries",
208 _secondary_free_list.length());
209 // It looks as if there are free regions available on the
210 // secondary_free_list. Let's move them to the free_list and try
211 // again to allocate from it.
212 append_secondary_free_list();
213
214 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
215 "empty we should have moved at least one entry to the free_list");
216 HeapRegion* res = _hrm.allocate_free_region(is_old);
217 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
218 "allocated " HR_FORMAT " from secondary_free_list",
219 HR_FORMAT_PARAMS(res));
220 return res;
221 }
222
223 // Wait here until we get notified either when (a) there are no
224 // more free regions coming or (b) some regions have been moved on
225 // the secondary_free_list.
226 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
227 }
228
229 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
230 "could not allocate from secondary_free_list");
231 return NULL;
232 }
233
234 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
235 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
236 "the only time we use this to allocate a humongous region is "
237 "when we are allocating a single humongous region");
238
239 HeapRegion* res;
240 if (G1StressConcRegionFreeing) {
241 if (!_secondary_free_list.is_empty()) {
242 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
243 "forced to look at the secondary_free_list");
244 res = new_region_try_secondary_free_list(is_old);
245 if (res != NULL) {
246 return res;
247 }
248 }
249 }
250
251 res = _hrm.allocate_free_region(is_old);
252
253 if (res == NULL) {
254 log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
255 "res == NULL, trying the secondary_free_list");
256 res = new_region_try_secondary_free_list(is_old);
257 }
258 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
259 // Currently, only attempts to allocate GC alloc regions set
260 // do_expand to true. So, we should only reach here during a
261 // safepoint. If this assumption changes we might have to
262 // reconsider the use of _expand_heap_after_alloc_failure.
263 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
264
265 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
266 word_size * HeapWordSize);
267
268 if (expand(word_size * HeapWordSize)) {
269 // Given that expand() succeeded in expanding the heap, and we
270 // always expand the heap by an amount aligned to the heap
271 // region size, the free list should in theory not be empty.
272 // In either case allocate_free_region() will check for NULL.
273 res = _hrm.allocate_free_region(is_old);
274 } else {
275 _expand_heap_after_alloc_failure = false;
276 }
277 }
278 return res;
279 }
280
281 HeapWord*
282 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
283 uint num_regions,
284 size_t word_size,
285 AllocationContext_t context) {
286 assert(first != G1_NO_HRM_INDEX, "pre-condition");
287 assert(is_humongous(word_size), "word_size should be humongous");
418 // potentially waits for regions from the secondary free list.
419 wait_while_free_regions_coming();
420 append_secondary_free_list_if_not_empty_with_lock();
421
422 // Policy: Try only empty regions (i.e. already committed first). Maybe we
423 // are lucky enough to find some.
424 first = _hrm.find_contiguous_only_empty(obj_regions);
425 if (first != G1_NO_HRM_INDEX) {
426 _hrm.allocate_free_regions_starting_at(first, obj_regions);
427 }
428 }
429
430 if (first == G1_NO_HRM_INDEX) {
431 // Policy: We could not find enough regions for the humongous object in the
432 // free list. Look through the heap to find a mix of free and uncommitted regions.
433 // If so, try expansion.
434 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
435 if (first != G1_NO_HRM_INDEX) {
436 // We found something. Make sure these regions are committed, i.e. expand
437 // the heap. Alternatively we could do a defragmentation GC.
438 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
439 word_size * HeapWordSize);
440
441
442 _hrm.expand_at(first, obj_regions);
443 g1_policy()->record_new_heap_size(num_regions());
444
445 #ifdef ASSERT
446 for (uint i = first; i < first + obj_regions; ++i) {
447 HeapRegion* hr = region_at(i);
448 assert(hr->is_free(), "sanity");
449 assert(hr->is_empty(), "sanity");
450 assert(is_on_master_free_list(hr), "sanity");
451 }
452 #endif
453 _hrm.allocate_free_regions_starting_at(first, obj_regions);
454 } else {
455 // Policy: Potentially trigger a defragmentation GC.
456 }
457 }
458
459 HeapWord* result = NULL;
460 if (first != G1_NO_HRM_INDEX) {
461 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
739 HeapRegion* start_region = _hrm.addr_to_region(start_address);
740 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
741 start_address = start_region->end();
742 if (start_address > last_address) {
743 increase_used(word_size * HeapWordSize);
744 start_region->set_top(last_address + 1);
745 continue;
746 }
747 start_region->set_top(start_address);
748 curr_range = MemRegion(start_address, last_address + 1);
749 start_region = _hrm.addr_to_region(start_address);
750 }
751
752 // Perform the actual region allocation, exiting if it fails.
753 // Then note how much new space we have allocated.
754 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
755 return false;
756 }
757 increase_used(word_size * HeapWordSize);
758 if (commits != 0) {
759 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
760 HeapRegion::GrainWords * HeapWordSize * commits);
761
762 }
763
764 // Mark each G1 region touched by the range as archive, add it to the old set,
765 // and set the allocation context and top.
766 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
767 HeapRegion* last_region = _hrm.addr_to_region(last_address);
768 prev_last_region = last_region;
769
770 while (curr_region != NULL) {
771 assert(curr_region->is_empty() && !curr_region->is_pinned(),
772 "Region already in use (index %u)", curr_region->hrm_index());
773 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
774 curr_region->set_allocation_context(AllocationContext::system());
775 curr_region->set_archive();
776 _old_set.add(curr_region);
777 if (curr_region != last_region) {
778 curr_region->set_top(curr_region->end());
779 curr_region = _hrm.next_region_in_heap(curr_region);
780 } else {
781 curr_region->set_top(last_address + 1);
922 guarantee(curr_region->is_archive(),
923 "Expected archive region at index %u", curr_region->hrm_index());
924 uint curr_index = curr_region->hrm_index();
925 _old_set.remove(curr_region);
926 curr_region->set_free();
927 curr_region->set_top(curr_region->bottom());
928 if (curr_region != last_region) {
929 curr_region = _hrm.next_region_in_heap(curr_region);
930 } else {
931 curr_region = NULL;
932 }
933 _hrm.shrink_at(curr_index, 1);
934 uncommitted_regions++;
935 }
936
937 // Notify mark-sweep that this is no longer an archive range.
938 G1MarkSweep::set_range_archive(ranges[i], false);
939 }
940
941 if (uncommitted_regions != 0) {
942 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
943 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
944 }
945 decrease_used(size_used);
946 }
947
948 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
949 uint* gc_count_before_ret,
950 uint* gclocker_retry_count_ret) {
951 // The structure of this method has a lot of similarities to
952 // attempt_allocation_slow(). The reason these two were not merged
953 // into a single one is that such a method would require several "if
954 // allocation is not humongous do this, otherwise do that"
955 // conditional paths which would obscure its flow. In fact, an early
956 // version of this code did use a unified method which was harder to
957 // follow and, as a result, it had subtle bugs that were hard to
958 // track down. So keeping these two methods separate allows each to
959 // be more readable. It will be good to keep these two in sync as
960 // much as possible.
961
962 assert_heap_not_locked_and_not_at_safepoint();
1160 // We only generate output for non-empty regions.
1161 } else if (hr->is_starts_humongous()) {
1162 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1163 } else if (hr->is_continues_humongous()) {
1164 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1165 } else if (hr->is_archive()) {
1166 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1167 } else if (hr->is_old()) {
1168 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1169 } else {
1170 ShouldNotReachHere();
1171 }
1172 return false;
1173 }
1174
1175 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1176 : _hr_printer(hr_printer) { }
1177 };
1178
1179 void G1CollectedHeap::print_hrm_post_compaction() {
1180 if (_hr_printer.is_active()) {
1181 PostCompactionPrinterClosure cl(hr_printer());
1182 heap_region_iterate(&cl);
1183 }
1184
1185 }
1186
1187 bool G1CollectedHeap::do_collection(bool explicit_gc,
1188 bool clear_all_soft_refs,
1189 size_t word_size) {
1190 assert_at_safepoint(true /* should_be_vm_thread */);
1191
1192 if (GC_locker::check_active_before_gc()) {
1193 return false;
1194 }
1195
1196 STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1197 gc_timer->register_gc_start();
1198
1199 SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1200 GCIdMark gc_id_mark;
1201 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1202
1203 SvcGCMarker sgcm(SvcGCMarker::FULL);
1204 ResourceMark rm;
1205
1206 print_heap_before_gc();
1207 trace_heap_before_gc(gc_tracer);
1208
1209 size_t metadata_prev_used = MetaspaceAux::used_bytes();
1210
1211 verify_region_sets_optional();
1212
1213 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1214 collector_policy()->should_clear_all_soft_refs();
1215
1216 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1217
1218 {
1219 IsGCActiveMark x;
1220
1221 // Timing
1222 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1223 GCTraceCPUTime tcpu;
1224
1225 {
1226 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1227 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1228 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1229
1230 g1_policy()->record_full_collection_start();
1231
1232 // Note: When we have a more flexible GC logging framework that
1233 // allows us to add optional attributes to a GC log record we
1234 // could consider timing and reporting how long we wait in the
1235 // following two methods.
1236 wait_while_free_regions_coming();
1237 // If we start the compaction before the CM threads finish
1238 // scanning the root regions we might trip them over as we'll
1239 // be moving objects / updating references. So let's wait until
1240 // they are done. By telling them to abort, they should complete
1241 // early.
1242 _cm->root_regions()->abort();
1243 _cm->root_regions()->wait_until_scan_finished();
1244 append_secondary_free_list_if_not_empty_with_lock();
1245
1246 gc_prologue(true);
1257 #if defined(COMPILER2) || INCLUDE_JVMCI
1258 DerivedPointerTable::clear();
1259 #endif
1260
1261 // Disable discovery and empty the discovered lists
1262 // for the CM ref processor.
1263 ref_processor_cm()->disable_discovery();
1264 ref_processor_cm()->abandon_partial_discovery();
1265 ref_processor_cm()->verify_no_references_recorded();
1266
1267 // Abandon current iterations of concurrent marking and concurrent
1268 // refinement, if any are in progress. We have to do this before
1269 // wait_until_scan_finished() below.
1270 concurrent_mark()->abort();
1271
1272 // Make sure we'll choose a new allocation region afterwards.
1273 _allocator->release_mutator_alloc_region();
1274 _allocator->abandon_gc_alloc_regions();
1275 g1_rem_set()->cleanupHRRS();
1276
1277 // We may have added regions to the current incremental collection
1278 // set between the last GC or pause and now. We need to clear the
1279 // incremental collection set and then start rebuilding it afresh
1280 // after this full GC.
1281 abandon_collection_set(g1_policy()->inc_cset_head());
1282 g1_policy()->clear_incremental_cset();
1283 g1_policy()->stop_incremental_cset_building();
1284
1285 tear_down_region_sets(false /* free_list_only */);
1286 collector_state()->set_gcs_are_young(true);
1287
1288 // See the comments in g1CollectedHeap.hpp and
1289 // G1CollectedHeap::ref_processing_init() about
1290 // how reference processing currently works in G1.
1291
1292 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1293 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1294
1295 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1296 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1324 ClassLoaderDataGraph::purge();
1325 MetaspaceAux::verify_metrics();
1326
1327 // Note: since we've just done a full GC, concurrent
1328 // marking is no longer active. Therefore we need not
1329 // re-enable reference discovery for the CM ref processor.
1330 // That will be done at the start of the next marking cycle.
1331 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1332 ref_processor_cm()->verify_no_references_recorded();
1333
1334 reset_gc_time_stamp();
1335 // Since everything potentially moved, we will clear all remembered
1336 // sets, and clear all cards. Later we will rebuild remembered
1337 // sets. We will also reset the GC time stamps of the regions.
1338 clear_rsets_post_compaction();
1339 check_gc_time_stamps();
1340
1341 // Resize the heap if necessary.
1342 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1343
1344 // We should do this after we potentially resize the heap so
1345 // that all the COMMIT / UNCOMMIT events are generated before
1346 // the compaction events.
1347 print_hrm_post_compaction();
1348
1349 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1350 if (hot_card_cache->use_cache()) {
1351 hot_card_cache->reset_card_counts();
1352 hot_card_cache->reset_hot_cache();
1353 }
1354
1355 // Rebuild remembered sets of all regions.
1356 uint n_workers =
1357 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1358 workers()->active_workers(),
1359 Threads::number_of_non_daemon_threads());
1360 workers()->set_active_workers(n_workers);
1361
1362 ParRebuildRSTask rebuild_rs_task(this);
1363 workers()->run_task(&rebuild_rs_task);
1364
1365 // Rebuild the strong code root lists for each region
1366 rebuild_strong_code_roots();
1367
1396 // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1397 // objects marked during a full GC against the previous bitmap.
1398 // But we need to clear it before calling check_bitmaps below since
1399 // the full GC has compacted objects and updated TAMS but not updated
1400 // the prev bitmap.
1401 if (G1VerifyBitmaps) {
1402 ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1403 }
1404 check_bitmaps("Full GC End");
1405
1406 // Start a new incremental collection set for the next pause
1407 assert(g1_policy()->collection_set() == NULL, "must be");
1408 g1_policy()->start_incremental_cset_building();
1409
1410 clear_cset_fast_test();
1411
1412 _allocator->init_mutator_alloc_region();
1413
1414 g1_policy()->record_full_collection_end();
1415
1416 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1417 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1418 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1419 // before any GC notifications are raised.
1420 g1mm()->update_sizes();
1421
1422 gc_epilogue(true);
1423 }
1424
1425 g1_policy()->print_detailed_heap_transition();
1426
1427 print_heap_after_gc();
1428 trace_heap_after_gc(gc_tracer);
1429
1430 post_full_gc_dump(gc_timer);
1431
1432 gc_timer->register_gc_end();
1433 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1434 }
1435
1436 return true;
1437 }
1438
1439 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1440 // do_collection() will return whether it succeeded in performing
1441 // the GC. Currently, there is no facility on the
1442 // do_full_collection() API to notify the caller than the collection
1443 // did not succeed (e.g., because it was locked out by the GC
1444 // locker). So, right now, we'll ignore the return value.
1445 bool dummy = do_collection(true, /* explicit_gc */
1490
1491 // This assert only makes sense here, before we adjust them
1492 // with respect to the min and max heap size.
1493 assert(minimum_desired_capacity <= maximum_desired_capacity,
1494 "minimum_desired_capacity = " SIZE_FORMAT ", "
1495 "maximum_desired_capacity = " SIZE_FORMAT,
1496 minimum_desired_capacity, maximum_desired_capacity);
1497
1498 // Should not be greater than the heap max size. No need to adjust
1499 // it with respect to the heap min size as it's a lower bound (i.e.,
1500 // we'll try to make the capacity larger than it, not smaller).
1501 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1502 // Should not be less than the heap min size. No need to adjust it
1503 // with respect to the heap max size as it's an upper bound (i.e.,
1504 // we'll try to make the capacity smaller than it, not greater).
1505 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1506
1507 if (capacity_after_gc < minimum_desired_capacity) {
1508 // Don't expand unless it's significant
1509 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1510
1511 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1512 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1513 capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1514
1515 expand(expand_bytes);
1516
1517 // No expansion, now see if we want to shrink
1518 } else if (capacity_after_gc > maximum_desired_capacity) {
1519 // Capacity too large, compute shrinking size
1520 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1521
1522 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1523 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1524 capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1525
1526 shrink(shrink_bytes);
1527 }
1528 }
1529
1530 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1531 AllocationContext_t context,
1532 bool do_gc,
1533 bool clear_all_soft_refs,
1534 bool expect_null_mutator_alloc_region,
1535 bool* gc_succeeded) {
1536 *gc_succeeded = true;
1537 // Let's attempt the allocation first.
1538 HeapWord* result =
1539 attempt_allocation_at_safepoint(word_size,
1540 context,
1541 expect_null_mutator_alloc_region);
1542 if (result != NULL) {
1543 assert(*gc_succeeded, "sanity");
1544 return result;
1545 }
1612
1613 // What else? We might try synchronous finalization later. If the total
1614 // space available is large enough for the allocation, then a more
1615 // complete compaction phase than we've tried so far might be
1616 // appropriate.
1617 assert(*succeeded, "sanity");
1618 return NULL;
1619 }
1620
1621 // Attempting to expand the heap sufficiently
1622 // to support an allocation of the given "word_size". If
1623 // successful, perform the allocation and return the address of the
1624 // allocated block, or else "NULL".
1625
1626 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1627 assert_at_safepoint(true /* should_be_vm_thread */);
1628
1629 verify_region_sets_optional();
1630
1631 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1632 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1633 word_size * HeapWordSize);
1634
1635
1636 if (expand(expand_bytes)) {
1637 _hrm.verify_optional();
1638 verify_region_sets_optional();
1639 return attempt_allocation_at_safepoint(word_size,
1640 context,
1641 false /* expect_null_mutator_alloc_region */);
1642 }
1643 return NULL;
1644 }
1645
1646 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1647 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1648 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1649 HeapRegion::GrainBytes);
1650
1651 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1652 expand_bytes, aligned_expand_bytes);
1653
1654 if (is_maximal_no_gc()) {
1655 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1656 return false;
1657 }
1658
1659 double expand_heap_start_time_sec = os::elapsedTime();
1660 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1661 assert(regions_to_expand > 0, "Must expand by at least one region");
1662
1663 uint expanded_by = _hrm.expand_by(regions_to_expand);
1664 if (expand_time_ms != NULL) {
1665 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1666 }
1667
1668 if (expanded_by > 0) {
1669 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1670 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1671 g1_policy()->record_new_heap_size(num_regions());
1672 } else {
1673 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1674
1675 // The expansion of the virtual storage space was unsuccessful.
1676 // Let's see if it was because we ran out of swap.
1677 if (G1ExitOnExpansionFailure &&
1678 _hrm.available() >= regions_to_expand) {
1679 // We had head room...
1680 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1681 }
1682 }
1683 return regions_to_expand > 0;
1684 }
1685
1686 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1687 size_t aligned_shrink_bytes =
1688 ReservedSpace::page_align_size_down(shrink_bytes);
1689 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1690 HeapRegion::GrainBytes);
1691 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1692
1693 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1694 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1695
1696
1697 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1698 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1699 if (num_regions_removed > 0) {
1700 g1_policy()->record_new_heap_size(num_regions());
1701 } else {
1702 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1703 }
1704 }
1705
1706 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1707 verify_region_sets_optional();
1708
1709 // We should only reach here at the end of a Full GC which means we
1710 // should not not be holding to any GC alloc regions. The method
1711 // below will make sure of that and do any remaining clean up.
1712 _allocator->abandon_gc_alloc_regions();
1713
1714 // Instead of tearing down / rebuilding the free lists here, we
1715 // could instead use the remove_all_pending() method on free_list to
1716 // remove only the ones that we need to remove.
1717 tear_down_region_sets(true /* free_list_only */);
1718 shrink_helper(shrink_bytes);
1719 rebuild_region_sets(true /* free_list_only */);
1720
1721 _hrm.verify_optional();
1722 verify_region_sets_optional();
1794 // Initialize the G1EvacuationFailureALot counters and flags.
1795 NOT_PRODUCT(reset_evacuation_should_fail();)
1796
1797 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1798 }
1799
1800 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1801 size_t size,
1802 size_t translation_factor) {
1803 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1804 // Allocate a new reserved space, preferring to use large pages.
1805 ReservedSpace rs(size, preferred_page_size);
1806 G1RegionToSpaceMapper* result =
1807 G1RegionToSpaceMapper::create_mapper(rs,
1808 size,
1809 rs.alignment(),
1810 HeapRegion::GrainBytes,
1811 translation_factor,
1812 mtGC);
1813 if (TracePageSizes) {
1814 tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1815 description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1816 }
1817 return result;
1818 }
1819
1820 jint G1CollectedHeap::initialize() {
1821 CollectedHeap::pre_initialize();
1822 os::enable_vtime();
1823
1824 // Necessary to satisfy locking discipline assertions.
1825
1826 MutexLocker x(Heap_lock);
1827
1828 // While there are no constraints in the GC code that HeapWordSize
1829 // be any particular value, there are multiple other areas in the
1830 // system which believe this to be true (e.g. oop->object_size in some
1831 // cases incorrectly returns the size in wordSize units rather than
1832 // HeapWordSize).
1833 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1834
1835 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1836 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1837 size_t heap_alignment = collector_policy()->heap_alignment();
1838
1839 // Ensure that the sizes are properly aligned.
1840 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1841 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1842 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1843
1844 _refine_cte_cl = new RefineCardTableEntryClosure();
1845
1846 jint ecode = JNI_OK;
1847 _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);
2000 G1AllocRegion::setup(this, dummy_region);
2001
2002 _allocator->init_mutator_alloc_region();
2003
2004 // Do create of the monitoring and management support so that
2005 // values in the heap have been properly initialized.
2006 _g1mm = new G1MonitoringSupport(this);
2007
2008 G1StringDedup::initialize();
2009
2010 _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2011 for (uint i = 0; i < ParallelGCThreads; i++) {
2012 new (&_preserved_objs[i]) OopAndMarkOopStack();
2013 }
2014
2015 return JNI_OK;
2016 }
2017
2018 void G1CollectedHeap::stop() {
2019 // Stop all concurrent threads. We do this to make sure these threads
2020 // do not continue to execute and access resources (e.g. logging)
2021 // that are destroyed during shutdown.
2022 _cg1r->stop();
2023 _cmThread->stop();
2024 if (G1StringDedup::is_enabled()) {
2025 G1StringDedup::stop();
2026 }
2027 }
2028
2029 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2030 return HeapRegion::max_region_size();
2031 }
2032
2033 void G1CollectedHeap::post_initialize() {
2034 CollectedHeap::post_initialize();
2035 ref_processing_init();
2036 }
2037
2038 void G1CollectedHeap::ref_processing_init() {
2039 // Reference processing in G1 currently works as follows:
2040 //
2117 }
2118
2119 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2120 hr->reset_gc_time_stamp();
2121 }
2122
2123 #ifndef PRODUCT
2124
2125 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2126 private:
2127 unsigned _gc_time_stamp;
2128 bool _failures;
2129
2130 public:
2131 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2132 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2133
2134 virtual bool doHeapRegion(HeapRegion* hr) {
2135 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2136 if (_gc_time_stamp != region_gc_time_stamp) {
2137 log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
2138 region_gc_time_stamp, _gc_time_stamp);
2139 _failures = true;
2140 }
2141 return false;
2142 }
2143
2144 bool failures() { return _failures; }
2145 };
2146
2147 void G1CollectedHeap::check_gc_time_stamps() {
2148 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2149 heap_region_iterate(&cl);
2150 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2151 }
2152 #endif // PRODUCT
2153
2154 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2155 _cg1r->hot_card_cache()->drain(cl, worker_i);
2156 }
2157
2705 private:
2706 G1CollectedHeap* _g1h;
2707 VerifyOption _vo;
2708 bool _failures;
2709 public:
2710 // _vo == UsePrevMarking -> use "prev" marking information,
2711 // _vo == UseNextMarking -> use "next" marking information,
2712 // _vo == UseMarkWord -> use mark word from object header.
2713 VerifyRootsClosure(VerifyOption vo) :
2714 _g1h(G1CollectedHeap::heap()),
2715 _vo(vo),
2716 _failures(false) { }
2717
2718 bool failures() { return _failures; }
2719
2720 template <class T> void do_oop_nv(T* p) {
2721 T heap_oop = oopDesc::load_heap_oop(p);
2722 if (!oopDesc::is_null(heap_oop)) {
2723 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2724 if (_g1h->is_obj_dead_cond(obj, _vo)) {
2725 LogHandle(gc, verify) log;
2726 log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2727 if (_vo == VerifyOption_G1UseMarkWord) {
2728 log.info(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2729 }
2730 ResourceMark rm;
2731 obj->print_on(log.info_stream());
2732 _failures = true;
2733 }
2734 }
2735 }
2736
2737 void do_oop(oop* p) { do_oop_nv(p); }
2738 void do_oop(narrowOop* p) { do_oop_nv(p); }
2739 };
2740
2741 class G1VerifyCodeRootOopClosure: public OopClosure {
2742 G1CollectedHeap* _g1h;
2743 OopClosure* _root_cl;
2744 nmethod* _nm;
2745 VerifyOption _vo;
2746 bool _failures;
2747
2748 template <class T> void do_oop_work(T* p) {
2749 // First verify that this root is live
2750 _root_cl->do_oop(p);
2751
2756
2757 // Don't check the code roots during marking verification in a full GC
2758 if (_vo == VerifyOption_G1UseMarkWord) {
2759 return;
2760 }
2761
2762 // Now verify that the current nmethod (which contains p) is
2763 // in the code root list of the heap region containing the
2764 // object referenced by p.
2765
2766 T heap_oop = oopDesc::load_heap_oop(p);
2767 if (!oopDesc::is_null(heap_oop)) {
2768 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2769
2770 // Now fetch the region containing the object
2771 HeapRegion* hr = _g1h->heap_region_containing(obj);
2772 HeapRegionRemSet* hrrs = hr->rem_set();
2773 // Verify that the strong code root list for this region
2774 // contains the nmethod
2775 if (!hrrs->strong_code_roots_list_contains(_nm)) {
2776 log_info(gc, verify)("Code root location " PTR_FORMAT " "
2777 "from nmethod " PTR_FORMAT " not in strong "
2778 "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2779 p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2780 _failures = true;
2781 }
2782 }
2783 }
2784
2785 public:
2786 G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2787 _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2788
2789 void do_oop(oop* p) { do_oop_work(p); }
2790 void do_oop(narrowOop* p) { do_oop_work(p); }
2791
2792 void set_nmethod(nmethod* nm) { _nm = nm; }
2793 bool failures() { return _failures; }
2794 };
2795
2796 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2937 }
2938
2939 bool doHeapRegion(HeapRegion* r) {
2940 // For archive regions, verify there are no heap pointers to
2941 // non-pinned regions. For all others, verify liveness info.
2942 if (r->is_archive()) {
2943 VerifyArchiveRegionClosure verify_oop_pointers(r);
2944 r->object_iterate(&verify_oop_pointers);
2945 return true;
2946 }
2947 if (!r->is_continues_humongous()) {
2948 bool failures = false;
2949 r->verify(_vo, &failures);
2950 if (failures) {
2951 _failures = true;
2952 } else if (!r->is_starts_humongous()) {
2953 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2954 r->object_iterate(¬_dead_yet_cl);
2955 if (_vo != VerifyOption_G1UseNextMarking) {
2956 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2957 log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
2958 p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
2959 _failures = true;
2960 }
2961 } else {
2962 // When vo == UseNextMarking we cannot currently do a sanity
2963 // check on the live bytes as the calculation has not been
2964 // finalized yet.
2965 }
2966 }
2967 }
2968 return false; // stop the region iteration if we hit a failure
2969 }
2970 };
2971
2972 // This is the task used for parallel verification of the heap regions
2973
2974 class G1ParVerifyTask: public AbstractGangTask {
2975 private:
2976 G1CollectedHeap* _g1h;
2977 VerifyOption _vo;
2978 bool _failures;
2986 AbstractGangTask("Parallel verify task"),
2987 _g1h(g1h),
2988 _vo(vo),
2989 _failures(false),
2990 _hrclaimer(g1h->workers()->active_workers()) {}
2991
2992 bool failures() {
2993 return _failures;
2994 }
2995
2996 void work(uint worker_id) {
2997 HandleMark hm;
2998 VerifyRegionClosure blk(true, _vo);
2999 _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3000 if (blk.failures()) {
3001 _failures = true;
3002 }
3003 }
3004 };
3005
3006 void G1CollectedHeap::verify(VerifyOption vo) {
3007 if (!SafepointSynchronize::is_at_safepoint()) {
3008 log_info(gc, verify)("Skipping verification. Not at safepoint.");
3009 }
3010
3011 assert(Thread::current()->is_VM_thread(),
3012 "Expected to be executed serially by the VM thread at this point");
3013
3014 log_debug(gc, verify)("Roots");
3015 VerifyRootsClosure rootsCl(vo);
3016 VerifyKlassClosure klassCl(this, &rootsCl);
3017 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3018
3019 // We apply the relevant closures to all the oops in the
3020 // system dictionary, class loader data graph, the string table
3021 // and the nmethods in the code cache.
3022 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3023 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3024
3025 {
3026 G1RootProcessor root_processor(this, 1);
3027 root_processor.process_all_roots(&rootsCl,
3028 &cldCl,
3029 &blobsCl);
3030 }
3031
3032 bool failures = rootsCl.failures() || codeRootsCl.failures();
3033
3034 if (vo != VerifyOption_G1UseMarkWord) {
3035 // If we're verifying during a full GC then the region sets
3036 // will have been torn down at the start of the GC. Therefore
3037 // verifying the region sets will fail. So we only verify
3038 // the region sets when not in a full GC.
3039 log_debug(gc, verify)("HeapRegionSets");
3040 verify_region_sets();
3041 }
3042
3043 log_debug(gc, verify)("HeapRegions");
3044 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3045
3046 G1ParVerifyTask task(this, vo);
3047 workers()->run_task(&task);
3048 if (task.failures()) {
3049 failures = true;
3050 }
3051
3052 } else {
3053 VerifyRegionClosure blk(false, vo);
3054 heap_region_iterate(&blk);
3055 if (blk.failures()) {
3056 failures = true;
3057 }
3058 }
3059
3060 if (G1StringDedup::is_enabled()) {
3061 log_debug(gc, verify)("StrDedup");
3062 G1StringDedup::verify();
3063 }
3064
3065 if (failures) {
3066 log_info(gc, verify)("Heap after failed verification:");
3067 // It helps to have the per-region information in the output to
3068 // help us track down what went wrong. This is why we call
3069 // print_extended_on() instead of print_on().
3070 LogHandle(gc, verify) log;
3071 ResourceMark rm;
3072 print_extended_on(log.info_stream());
3073 }
3074 guarantee(!failures, "there should not have been any failures");
3075 }
3076
3077 double G1CollectedHeap::verify(bool guard, const char* msg) {
3078 double verify_time_ms = 0.0;
3079
3080 if (guard && total_collections() >= VerifyGCStartAt) {
3081 double verify_start = os::elapsedTime();
3082 HandleMark hm; // Discard invalid handles created during verification
3083 prepare_for_verify();
3084 Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3085 verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3086 }
3087
3088 return verify_time_ms;
3089 }
3090
3091 void G1CollectedHeap::verify_before_gc() {
3092 double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3093 g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3094 }
3095
3096 void G1CollectedHeap::verify_after_gc() {
3097 double verify_time_ms = verify(VerifyAfterGC, "After GC");
3098 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3099 }
3100
3101 class PrintRegionClosure: public HeapRegionClosure {
3102 outputStream* _st;
3103 public:
3104 PrintRegionClosure(outputStream* st) : _st(st) {}
3105 bool doHeapRegion(HeapRegion* r) {
3106 r->print_on(_st);
3107 return false;
3108 }
3109 };
3110
3111 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3112 const HeapRegion* hr,
3113 const VerifyOption vo) const {
3114 switch (vo) {
3115 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3116 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3117 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
3187 G1StringDedup::print_worker_threads_on(st);
3188 }
3189 }
3190
3191 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3192 workers()->threads_do(tc);
3193 tc->do_thread(_cmThread);
3194 _cg1r->threads_do(tc);
3195 if (G1StringDedup::is_enabled()) {
3196 G1StringDedup::threads_do(tc);
3197 }
3198 }
3199
3200 void G1CollectedHeap::print_tracing_info() const {
3201 // We'll overload this to mean "trace GC pause statistics."
3202 if (TraceYoungGenTime || TraceOldGenTime) {
3203 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3204 // to that.
3205 g1_policy()->print_tracing_info();
3206 }
3207 g1_rem_set()->print_summary_info();
3208 concurrent_mark()->print_summary_info();
3209 g1_policy()->print_yg_surv_rate_info();
3210 }
3211
3212 #ifndef PRODUCT
3213 // Helpful for debugging RSet issues.
3214
3215 class PrintRSetsClosure : public HeapRegionClosure {
3216 private:
3217 const char* _msg;
3218 size_t _occupied_sum;
3219
3220 public:
3221 bool doHeapRegion(HeapRegion* r) {
3222 HeapRegionRemSet* hrrs = r->rem_set();
3223 size_t occupied = hrrs->occupied();
3224 _occupied_sum += occupied;
3225
3226 tty->print_cr("Printing RSet for region " HR_FORMAT,
3227 HR_FORMAT_PARAMS(r));
3228 if (occupied == 0) {
3229 tty->print_cr(" RSet is empty");
3230 } else {
3231 hrrs->print();
3232 }
3233 tty->print_cr("----------");
3234 return false;
3235 }
3236
3237 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3238 tty->cr();
3239 tty->print_cr("========================================");
3240 tty->print_cr("%s", msg);
3241 tty->cr();
3242 }
3243
3244 ~PrintRSetsClosure() {
3245 tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3246 tty->print_cr("========================================");
3247 tty->cr();
3248 }
3249 };
3250
3251 void G1CollectedHeap::print_cset_rsets() {
3252 PrintRSetsClosure cl("Printing CSet RSets");
3253 collection_set_iterate(&cl);
3254 }
3255
3256 void G1CollectedHeap::print_all_rsets() {
3257 PrintRSetsClosure cl("Printing All RSets");;
3258 heap_region_iterate(&cl);
3259 }
3260 #endif // PRODUCT
3261
3262 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3263 YoungList* young_list = heap()->young_list();
3264
3265 size_t eden_used_bytes = young_list->eden_used_bytes();
3266 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3267
3285
3286 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3287 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3288 }
3289
3290
3291 G1CollectedHeap* G1CollectedHeap::heap() {
3292 CollectedHeap* heap = Universe::heap();
3293 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3294 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3295 return (G1CollectedHeap*)heap;
3296 }
3297
3298 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3299 // always_do_update_barrier = false;
3300 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3301 // Fill TLAB's and such
3302 accumulate_statistics_all_tlabs();
3303 ensure_parsability(true);
3304
3305 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
3306 }
3307
3308 void G1CollectedHeap::gc_epilogue(bool full) {
3309 // we are at the end of the GC. Total collections has already been increased.
3310 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
3311
3312 // FIXME: what is this about?
3313 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3314 // is set.
3315 #if defined(COMPILER2) || INCLUDE_JVMCI
3316 assert(DerivedPointerTable::is_empty(), "derived pointer present");
3317 #endif
3318 // always_do_update_barrier = true;
3319
3320 resize_all_tlabs();
3321 allocation_context_stats().update(full);
3322
3323 // We have just completed a GC. Update the soft reference
3324 // policy with the new heap occupancy
3325 Universe::update_heap_info_at_gc();
3326 }
3327
3328 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3329 uint gc_count_before,
3330 bool* succeeded,
3536 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3537
3538 // Here's a good place to add any other checks we'd like to
3539 // perform on CSet regions.
3540 return false;
3541 }
3542 };
3543 #endif // ASSERT
3544
3545 uint G1CollectedHeap::num_task_queues() const {
3546 return _task_queues->size();
3547 }
3548
3549 #if TASKQUEUE_STATS
3550 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3551 st->print_raw_cr("GC Task Stats");
3552 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3553 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3554 }
3555
3556 void G1CollectedHeap::print_taskqueue_stats() const {
3557 LogHandle(gc, task, stats) log;
3558 if (!log.is_develop()) {
3559 return;
3560 }
3561 ResourceMark rm;
3562 outputStream* st = log.develop_stream();
3563
3564 print_taskqueue_stats_hdr(st);
3565
3566 TaskQueueStats totals;
3567 const uint n = num_task_queues();
3568 for (uint i = 0; i < n; ++i) {
3569 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3570 totals += task_queue(i)->stats;
3571 }
3572 st->print_raw("tot "); totals.print(st); st->cr();
3573
3574 DEBUG_ONLY(totals.verify());
3575 }
3576
3577 void G1CollectedHeap::reset_taskqueue_stats() {
3578 const uint n = num_task_queues();
3579 for (uint i = 0; i < n; ++i) {
3580 task_queue(i)->stats.reset();
3581 }
3582 }
3583 #endif // TASKQUEUE_STATS
3584
3585 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {
3586 if (evacuation_failed()) {
3587 log_info(gc)("To-space exhausted");
3588 }
3589
3590 double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3591 g1_policy()->print_phases(pause_time_sec);
3592
3593 g1_policy()->print_detailed_heap_transition();
3594 }
3595
3596
3597 void G1CollectedHeap::wait_for_root_region_scanning() {
3598 double scan_wait_start = os::elapsedTime();
3599 // We have to wait until the CM threads finish scanning the
3600 // root regions as it's the only way to ensure that all the
3601 // objects on them have been correctly scanned before we start
3602 // moving them during the GC.
3603 bool waited = _cm->root_regions()->wait_until_scan_finished();
3604 double wait_time_ms = 0.0;
3605 if (waited) {
3606 double scan_wait_end = os::elapsedTime();
3607 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3608 }
3609 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3610 }
3611
3612 bool
3613 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3614 assert_at_safepoint(true /* should_be_vm_thread */);
3615 guarantee(!is_gc_active(), "collection is not reentrant");
3616
3617 if (GC_locker::check_active_before_gc()) {
3618 return false;
3619 }
3620
3621 _gc_timer_stw->register_gc_start();
3622
3623 GCIdMark gc_id_mark;
3624 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3625
3626 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3627 ResourceMark rm;
3628
3629 wait_for_root_region_scanning();
3630
3631 print_heap_before_gc();
3632 trace_heap_before_gc(_gc_tracer_stw);
3633
3634 verify_region_sets_optional();
3635 verify_dirty_young_regions();
3636
3637 // This call will decide whether this pause is an initial-mark
3638 // pause. If it is, during_initial_mark_pause() will return true
3639 // for the duration of this pause.
3640 g1_policy()->decide_on_conc_mark_initiation();
3641
3642 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3643 assert(!collector_state()->during_initial_mark_pause() ||
3644 collector_state()->gcs_are_young(), "sanity");
3645
3646 // We also do not allow mixed GCs during marking.
3647 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3648
3649 // Record whether this pause is an initial mark. When the current
3650 // thread has completed its logging output and it's safe to signal
3651 // the CM thread, the flag's value in the policy has been reset.
3652 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3653
3654 // Inner scope for scope based logging, timers, and stats collection
3655 {
3656 EvacuationInfo evacuation_info;
3657
3658 if (collector_state()->during_initial_mark_pause()) {
3659 // We are about to start a marking cycle, so we increment the
3660 // full collection counter.
3661 increment_old_marking_cycles_started();
3662 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3663 }
3664
3665 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3666
3667 GCTraceCPUTime tcpu;
3668
3669 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3670 workers()->active_workers(),
3671 Threads::number_of_non_daemon_threads());
3672 workers()->set_active_workers(active_workers);
3673 FormatBuffer<> gc_string("Pause ");
3674 if (collector_state()->during_initial_mark_pause()) {
3675 gc_string.append("Initial Mark");
3676 } else if (collector_state()->gcs_are_young()) {
3677 gc_string.append("Young");
3678 } else {
3679 gc_string.append("Mixed");
3680 }
3681 GCTraceTime(Info, gc) tm5(gc_string, NULL, gc_cause(), true);
3682
3683
3684 double pause_start_sec = os::elapsedTime();
3685 double pause_start_counter = os::elapsed_counter();
3686 g1_policy()->note_gc_start(active_workers);
3687
3688 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3689 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3690
3691 // If the secondary_free_list is not empty, append it to the
3692 // free_list. No need to wait for the cleanup operation to finish;
3693 // the region allocation code will check the secondary_free_list
3694 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3695 // set, skip this step so that the region allocation code has to
3696 // get entries from the secondary_free_list.
3697 if (!G1StressConcRegionFreeing) {
3698 append_secondary_free_list_if_not_empty_with_lock();
3699 }
3700
3701 assert(check_young_list_well_formed(), "young list should be well formed");
3702
3703 // Don't dynamically change the number of GC threads this early. A value of
3704 // 0 is used to indicate serial work. When parallel work is done,
3705 // it will be set.
3706
3720 #endif
3721
3722 // Please see comment in g1CollectedHeap.hpp and
3723 // G1CollectedHeap::ref_processing_init() to see how
3724 // reference processing currently works in G1.
3725
3726 // Enable discovery in the STW reference processor
3727 ref_processor_stw()->enable_discovery();
3728
3729 {
3730 // We want to temporarily turn off discovery by the
3731 // CM ref processor, if necessary, and turn it back on
3732 // on again later if we do. Using a scoped
3733 // NoRefDiscovery object will do this.
3734 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3735
3736 // Forget the current alloc region (we might even choose it to be part
3737 // of the collection set!).
3738 _allocator->release_mutator_alloc_region();
3739
3740 // This timing is only used by the ergonomics to handle our pause target.
3741 // It is unclear why this should not include the full pause. We will
3742 // investigate this in CR 7178365.
3743 //
3744 // Preserving the old comment here if that helps the investigation:
3745 //
3746 // The elapsed time induced by the start time below deliberately elides
3747 // the possible verification above.
3748 double sample_start_time_sec = os::elapsedTime();
3749
3750 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3751
3752 if (collector_state()->during_initial_mark_pause()) {
3753 concurrent_mark()->checkpointRootsInitialPre();
3754 }
3755
3756 double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3757 g1_policy()->finalize_old_cset_part(time_remaining_ms);
3758
3759 evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3843
3844 if (collector_state()->during_initial_mark_pause()) {
3845 // We have to do this before we notify the CM threads that
3846 // they can start working to make sure that all the
3847 // appropriate initialization is done on the CM object.
3848 concurrent_mark()->checkpointRootsInitialPost();
3849 collector_state()->set_mark_in_progress(true);
3850 // Note that we don't actually trigger the CM thread at
3851 // this point. We do that later when we're sure that
3852 // the current thread has completed its logging output.
3853 }
3854
3855 allocate_dummy_regions();
3856
3857 _allocator->init_mutator_alloc_region();
3858
3859 {
3860 size_t expand_bytes = g1_policy()->expansion_amount();
3861 if (expand_bytes > 0) {
3862 size_t bytes_before = capacity();
3863 // No need for an ergo logging here,
3864 // expansion_amount() does this when it returns a value > 0.
3865 double expand_ms;
3866 if (!expand(expand_bytes, &expand_ms)) {
3867 // We failed to expand the heap. Cannot do anything about it.
3868 }
3869 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3870 }
3871 }
3872
3873 // We redo the verification but now wrt to the new CSet which
3874 // has just got initialized after the previous CSet was freed.
3875 _cm->verify_no_cset_oops();
3876 _cm->note_end_of_gc();
3877
3878 // This timing is only used by the ergonomics to handle our pause target.
3879 // It is unclear why this should not include the full pause. We will
3880 // investigate this in CR 7178365.
3881 double sample_end_time_sec = os::elapsedTime();
3882 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3883 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3903 // stamp here we invalidate all the GC time stamps on all the
3904 // regions and saved_mark_word() will simply return top() for
3905 // all the regions. This is a nicer way of ensuring this rather
3906 // than iterating over the regions and fixing them. In fact, the
3907 // GC time stamp increment here also ensures that
3908 // saved_mark_word() will return top() between pauses, i.e.,
3909 // during concurrent refinement. So we don't need the
3910 // is_gc_active() check to decided which top to use when
3911 // scanning cards (see CR 7039627).
3912 increment_gc_time_stamp();
3913
3914 verify_after_gc();
3915 check_bitmaps("GC End");
3916
3917 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3918 ref_processor_stw()->verify_no_references_recorded();
3919
3920 // CM reference discovery will be re-enabled if necessary.
3921 }
3922
3923 #ifdef TRACESPINNING
3924 ParallelTaskTerminator::print_termination_counts();
3925 #endif
3926
3927 gc_epilogue(false);
3928 }
3929
3930 // Print the remainder of the GC log output.
3931 log_gc_footer(os::elapsed_counter() - pause_start_counter);
3932
3933 // It is not yet to safe to tell the concurrent mark to
3934 // start as we have some optional output below. We don't want the
3935 // output from the concurrent mark thread interfering with this
3936 // logging output either.
3937
3938 _hrm.verify_optional();
3939 verify_region_sets_optional();
3940
3941 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3942 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3943
3944 print_heap_after_gc();
3945 trace_heap_after_gc(_gc_tracer_stw);
3946
3947 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3948 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3949 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3950 // before any GC notifications are raised.
3951 g1mm()->update_sizes();
3952
3953 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3954 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3955 _gc_timer_stw->register_gc_end();
3956 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3957 }
3958 // It should now be safe to tell the concurrent mark thread to start
3959 // without its logging output interfering with the logging output
3960 // that came from the pause.
3961
4112
4113 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4114
4115 double term_sec = 0.0;
4116 size_t evac_term_attempts = 0;
4117 {
4118 double start = os::elapsedTime();
4119 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4120 evac.do_void();
4121
4122 evac_term_attempts = evac.term_attempts();
4123 term_sec = evac.term_time();
4124 double elapsed_sec = os::elapsedTime() - start;
4125 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4126 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4127 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4128 }
4129
4130 assert(pss->queue_is_empty(), "should be empty");
4131
4132 if (log_is_enabled(Debug, gc, task, stats)) {
4133 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4134 size_t lab_waste;
4135 size_t lab_undo_waste;
4136 pss->waste(lab_waste, lab_undo_waste);
4137 _g1h->print_termination_stats(worker_id,
4138 (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */
4139 strong_roots_sec * 1000.0, /* strong roots time */
4140 term_sec * 1000.0, /* evac term time */
4141 evac_term_attempts, /* evac term attempts */
4142 lab_waste, /* alloc buffer waste */
4143 lab_undo_waste /* undo waste */
4144 );
4145 }
4146
4147 // Close the inner scope so that the ResourceMark and HandleMark
4148 // destructors are executed here and are included as part of the
4149 // "GC Worker Time".
4150 }
4151 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4152 }
4153 };
4154
4155 void G1CollectedHeap::print_termination_stats_hdr() {
4156 LogHandle(gc, task, stats) log;
4157 if (!log.is_debug()) {
4158 return;
4159 }
4160 log.debug("GC Termination Stats");
4161 log.debug(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
4162 log.debug("thr ms ms %% ms %% attempts total alloc undo");
4163 log.debug("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4164 }
4165
4166 void G1CollectedHeap::print_termination_stats(uint worker_id,
4167 double elapsed_ms,
4168 double strong_roots_ms,
4169 double term_ms,
4170 size_t term_attempts,
4171 size_t alloc_buffer_waste,
4172 size_t undo_waste) const {
4173 log_debug(gc, task, stats)
4174 ("%3d %9.2f %9.2f %6.2f "
4175 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4176 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4177 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4178 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4179 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4180 alloc_buffer_waste * HeapWordSize / K,
4181 undo_waste * HeapWordSize / K);
4182 }
4183
4184 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4185 private:
4186 BoolObjectClosure* _is_alive;
4187 int _initial_string_table_size;
4188 int _initial_symbol_table_size;
4189
4190 bool _process_strings;
4191 int _strings_processed;
4192 int _strings_removed;
4193
4194 bool _process_symbols;
4203 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4204
4205 _initial_string_table_size = StringTable::the_table()->table_size();
4206 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4207 if (process_strings) {
4208 StringTable::clear_parallel_claimed_index();
4209 }
4210 if (process_symbols) {
4211 SymbolTable::clear_parallel_claimed_index();
4212 }
4213 }
4214
4215 ~G1StringSymbolTableUnlinkTask() {
4216 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4217 "claim value %d after unlink less than initial string table size %d",
4218 StringTable::parallel_claimed_index(), _initial_string_table_size);
4219 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4220 "claim value %d after unlink less than initial symbol table size %d",
4221 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4222
4223 log_trace(gc, stringdedup)("Cleaned string and symbol table, "
4224 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4225 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4226 strings_processed(), strings_removed(),
4227 symbols_processed(), symbols_removed());
4228 }
4229
4230 void work(uint worker_id) {
4231 int strings_processed = 0;
4232 int strings_removed = 0;
4233 int symbols_processed = 0;
4234 int symbols_removed = 0;
4235 if (_process_strings) {
4236 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4237 Atomic::add(strings_processed, &_strings_processed);
4238 Atomic::add(strings_removed, &_strings_removed);
4239 }
4240 if (_process_symbols) {
4241 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4242 Atomic::add(symbols_processed, &_symbols_processed);
4243 Atomic::add(symbols_removed, &_symbols_removed);
4244 }
4245 }
4246
4247 size_t strings_processed() const { return (size_t)_strings_processed; }
4248 size_t strings_removed() const { return (size_t)_strings_removed; }
5035
5036 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5037 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5038
5039 // Should G1EvacuationFailureALot be in effect for this GC?
5040 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5041
5042 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5043 double start_par_time_sec = os::elapsedTime();
5044 double end_par_time_sec;
5045
5046 {
5047 const uint n_workers = workers()->active_workers();
5048 G1RootProcessor root_processor(this, n_workers);
5049 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5050 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5051 if (collector_state()->during_initial_mark_pause()) {
5052 ClassLoaderDataGraph::clear_claimed_marks();
5053 }
5054
5055 print_termination_stats_hdr();
5056
5057 workers()->run_task(&g1_par_task);
5058 end_par_time_sec = os::elapsedTime();
5059
5060 // Closing the inner scope will execute the destructor
5061 // for the G1RootProcessor object. We record the current
5062 // elapsed time before closing the scope so that time
5063 // taken for the destructor is NOT included in the
5064 // reported parallel time.
5065 }
5066
5067 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5068
5069 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5070 phase_times->record_par_time(par_time_ms);
5071
5072 double code_root_fixup_time_ms =
5073 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5074 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5075
5263 }
5264 }
5265
5266 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5267 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5268 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5269 verify_dirty_region(hr);
5270 }
5271 }
5272
5273 void G1CollectedHeap::verify_dirty_young_regions() {
5274 verify_dirty_young_list(_young_list->first_region());
5275 }
5276
5277 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5278 HeapWord* tams, HeapWord* end) {
5279 guarantee(tams <= end,
5280 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5281 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5282 if (result < end) {
5283 log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5284 log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
5285 return false;
5286 }
5287 return true;
5288 }
5289
5290 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5291 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5292 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5293
5294 HeapWord* bottom = hr->bottom();
5295 HeapWord* ptams = hr->prev_top_at_mark_start();
5296 HeapWord* ntams = hr->next_top_at_mark_start();
5297 HeapWord* end = hr->end();
5298
5299 bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5300
5301 bool res_n = true;
5302 // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5303 // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5304 // if we happen to be in that state.
5305 if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5306 res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5307 }
5308 if (!res_p || !res_n) {
5309 log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5310 log_info(gc, verify)("#### Caller: %s", caller);
5311 return false;
5312 }
5313 return true;
5314 }
5315
5316 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5317 if (!G1VerifyBitmaps) return;
5318
5319 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5320 }
5321
5322 class G1VerifyBitmapClosure : public HeapRegionClosure {
5323 private:
5324 const char* _caller;
5325 G1CollectedHeap* _g1h;
5326 bool _failures;
5327
5328 public:
5329 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5330 _caller(caller), _g1h(g1h), _failures(false) { }
5342
5343 void G1CollectedHeap::check_bitmaps(const char* caller) {
5344 if (!G1VerifyBitmaps) return;
5345
5346 G1VerifyBitmapClosure cl(caller, this);
5347 heap_region_iterate(&cl);
5348 guarantee(!cl.failures(), "bitmap verification");
5349 }
5350
5351 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5352 private:
5353 bool _failures;
5354 public:
5355 G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5356
5357 virtual bool doHeapRegion(HeapRegion* hr) {
5358 uint i = hr->hrm_index();
5359 InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5360 if (hr->is_humongous()) {
5361 if (hr->in_collection_set()) {
5362 log_info(gc, verify)("\n## humongous region %u in CSet", i);
5363 _failures = true;
5364 return true;
5365 }
5366 if (cset_state.is_in_cset()) {
5367 log_info(gc, verify)("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5368 _failures = true;
5369 return true;
5370 }
5371 if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5372 log_info(gc, verify)("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5373 _failures = true;
5374 return true;
5375 }
5376 } else {
5377 if (cset_state.is_humongous()) {
5378 log_info(gc, verify)("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5379 _failures = true;
5380 return true;
5381 }
5382 if (hr->in_collection_set() != cset_state.is_in_cset()) {
5383 log_info(gc, verify)("\n## in CSet %d / cset state %d inconsistency for region %u",
5384 hr->in_collection_set(), cset_state.value(), i);
5385 _failures = true;
5386 return true;
5387 }
5388 if (cset_state.is_in_cset()) {
5389 if (hr->is_young() != (cset_state.is_young())) {
5390 log_info(gc, verify)("\n## is_young %d / cset state %d inconsistency for region %u",
5391 hr->is_young(), cset_state.value(), i);
5392 _failures = true;
5393 return true;
5394 }
5395 if (hr->is_old() != (cset_state.is_old())) {
5396 log_info(gc, verify)("\n## is_old %d / cset state %d inconsistency for region %u",
5397 hr->is_old(), cset_state.value(), i);
5398 _failures = true;
5399 return true;
5400 }
5401 }
5402 }
5403 return false;
5404 }
5405
5406 bool failures() const { return _failures; }
5407 };
5408
5409 bool G1CollectedHeap::check_cset_fast_test() {
5410 G1CheckCSetFastTableClosure cl;
5411 _hrm.iterate(&cl);
5412 return !cl.failures();
5413 }
5414 #endif // PRODUCT
5415
5416 void G1CollectedHeap::cleanUpCardTable() {
5586 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5587 // until the end of a concurrent mark.
5588 //
5589 // It is not required to check whether the object has been found dead by marking
5590 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5591 // all objects allocated during that time are considered live.
5592 // SATB marking is even more conservative than the remembered set.
5593 // So if at this point in the collection there is no remembered set entry,
5594 // nobody has a reference to it.
5595 // At the start of collection we flush all refinement logs, and remembered sets
5596 // are completely up-to-date wrt to references to the humongous object.
5597 //
5598 // Other implementation considerations:
5599 // - never consider object arrays at this time because they would pose
5600 // considerable effort for cleaning up the the remembered sets. This is
5601 // required because stale remembered sets might reference locations that
5602 // are currently allocated into.
5603 uint region_idx = r->hrm_index();
5604 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5605 !r->rem_set()->is_empty()) {
5606 log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5607 region_idx,
5608 (size_t)obj->size() * HeapWordSize,
5609 p2i(r->bottom()),
5610 r->rem_set()->occupied(),
5611 r->rem_set()->strong_code_roots_list_length(),
5612 next_bitmap->isMarked(r->bottom()),
5613 g1h->is_humongous_reclaim_candidate(region_idx),
5614 obj->is_typeArray()
5615 );
5616 return false;
5617 }
5618
5619 guarantee(obj->is_typeArray(),
5620 "Only eagerly reclaiming type arrays is supported, but the object "
5621 PTR_FORMAT " is not.", p2i(r->bottom()));
5622
5623 log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5624 region_idx,
5625 (size_t)obj->size() * HeapWordSize,
5626 p2i(r->bottom()),
5627 r->rem_set()->occupied(),
5628 r->rem_set()->strong_code_roots_list_length(),
5629 next_bitmap->isMarked(r->bottom()),
5630 g1h->is_humongous_reclaim_candidate(region_idx),
5631 obj->is_typeArray()
5632 );
5633
5634 // Need to clear mark bit of the humongous object if already set.
5635 if (next_bitmap->isMarked(r->bottom())) {
5636 next_bitmap->clear(r->bottom());
5637 }
5638 do {
5639 HeapRegion* next = g1h->next_region_in_humongous(r);
5640 _freed_bytes += r->used();
5641 r->set_containing_set(NULL);
5642 _humongous_regions_removed.increment(1u, r->capacity());
5643 g1h->free_humongous_region(r, _free_region_list, false);
5644 r = next;
5645 } while (r != NULL);
5646
5647 return false;
5648 }
5649
5650 HeapRegionSetCount& humongous_free_count() {
5651 return _humongous_regions_removed;
5652 }
5653
5654 size_t bytes_freed() const {
5655 return _freed_bytes;
5656 }
5657
5658 size_t humongous_reclaimed() const {
5659 return _humongous_regions_removed.length();
5660 }
5661 };
5662
5663 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5664 assert_at_safepoint(true);
5665
5666 if (!G1EagerReclaimHumongousObjects ||
5667 (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5668 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5669 return;
5670 }
5671
5672 double start_time = os::elapsedTime();
5673
5674 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5675
5676 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5677 heap_region_iterate(&cl);
5678
5679 HeapRegionSetCount empty_set;
5680 remove_from_old_sets(empty_set, cl.humongous_free_count());
5681
5682 G1HRPrinter* hrp = hr_printer();
5683 if (hrp->is_active()) {
5684 FreeRegionListIterator iter(&local_cleanup_list);
5685 while (iter.more_available()) {
5686 HeapRegion* hr = iter.get_next();
5687 hrp->cleanup(hr);
5700 // the current incremental collection set in preparation of a
5701 // full collection. After the full GC we will start to build up
5702 // the incremental collection set again.
5703 // This is only called when we're doing a full collection
5704 // and is immediately followed by the tearing down of the young list.
5705
5706 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5707 HeapRegion* cur = cs_head;
5708
5709 while (cur != NULL) {
5710 HeapRegion* next = cur->next_in_collection_set();
5711 assert(cur->in_collection_set(), "bad CS");
5712 cur->set_next_in_collection_set(NULL);
5713 clear_in_cset(cur);
5714 cur->set_young_index_in_cset(-1);
5715 cur = next;
5716 }
5717 }
5718
5719 void G1CollectedHeap::set_free_regions_coming() {
5720 log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "
5721 "setting free regions coming");
5722
5723 assert(!free_regions_coming(), "pre-condition");
5724 _free_regions_coming = true;
5725 }
5726
5727 void G1CollectedHeap::reset_free_regions_coming() {
5728 assert(free_regions_coming(), "pre-condition");
5729
5730 {
5731 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5732 _free_regions_coming = false;
5733 SecondaryFreeList_lock->notify_all();
5734 }
5735
5736 log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "
5737 "reset free regions coming");
5738 }
5739
5740 void G1CollectedHeap::wait_while_free_regions_coming() {
5741 // Most of the time we won't have to wait, so let's do a quick test
5742 // first before we take the lock.
5743 if (!free_regions_coming()) {
5744 return;
5745 }
5746
5747 log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "
5748 "waiting for free regions");
5749
5750 {
5751 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5752 while (free_regions_coming()) {
5753 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5754 }
5755 }
5756
5757 log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "
5758 "done waiting for free regions");
5759 }
5760
5761 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5762 return _allocator->is_retained_old_region(hr);
5763 }
5764
5765 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5766 _young_list->push_region(hr);
5767 }
5768
5769 class NoYoungRegionsClosure: public HeapRegionClosure {
5770 private:
5771 bool _success;
5772 public:
5773 NoYoungRegionsClosure() : _success(true) { }
5774 bool doHeapRegion(HeapRegion* r) {
5775 if (r->is_young()) {
5776 log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5777 p2i(r->bottom()), p2i(r->end()));
5778 _success = false;
5779 }
5780 return false;
5781 }
5782 bool success() { return _success; }
5783 };
5784
5785 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5786 bool ret = _young_list->check_list_empty(check_sample);
5787
5788 if (check_heap) {
5789 NoYoungRegionsClosure closure;
5790 heap_region_iterate(&closure);
5791 ret = ret && closure.success();
5792 }
5793
5794 return ret;
5795 }
5796
6007 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6008 size_t allocated_bytes,
6009 InCSetState dest) {
6010 bool during_im = collector_state()->during_initial_mark_pause();
6011 alloc_region->note_end_of_copying(during_im);
6012 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6013 if (dest.is_young()) {
6014 young_list()->add_survivor_region(alloc_region);
6015 } else {
6016 _old_set.add(alloc_region);
6017 }
6018 _hr_printer.retire(alloc_region);
6019 }
6020
6021 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6022 bool expanded = false;
6023 uint index = _hrm.find_highest_free(&expanded);
6024
6025 if (index != G1_NO_HRM_INDEX) {
6026 if (expanded) {
6027 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
6028 HeapRegion::GrainWords * HeapWordSize);
6029 }
6030 _hrm.allocate_free_regions_starting_at(index, 1);
6031 return region_at(index);
6032 }
6033 return NULL;
6034 }
6035
6036 // Heap region set verification
6037
6038 class VerifyRegionListsClosure : public HeapRegionClosure {
6039 private:
6040 HeapRegionSet* _old_set;
6041 HeapRegionSet* _humongous_set;
6042 HeapRegionManager* _hrm;
6043
6044 public:
6045 HeapRegionSetCount _old_count;
6046 HeapRegionSetCount _humongous_count;
6047 HeapRegionSetCount _free_count;
|