242 // objects, for improved usage accounting.
243 // How many words we use for filler objects.
244 size_t word_fill_size = word_size_sum - word_size;
245
246 // How many words memory we "waste" which cannot hold a filler object.
247 size_t words_not_fillable = 0;
248
249 if (word_fill_size >= min_fill_size()) {
250 fill_with_objects(obj_top, word_fill_size);
251 } else if (word_fill_size > 0) {
252 // We have space to fill, but we cannot fit an object there.
253 words_not_fillable = word_fill_size;
254 word_fill_size = 0;
255 }
256
257 // We will set up the first region as "starts humongous". This
258 // will also update the BOT covering all the regions to reflect
259 // that there is a single object that starts at the bottom of the
260 // first region.
261 first_hr->set_starts_humongous(obj_top, word_fill_size);
262 _g1_policy->remset_tracker()->update_at_allocate(first_hr);
263 // Then, if there are any, we will set up the "continues
264 // humongous" regions.
265 HeapRegion* hr = NULL;
266 for (uint i = first + 1; i <= last; ++i) {
267 hr = region_at(i);
268 hr->set_continues_humongous(first_hr);
269 _g1_policy->remset_tracker()->update_at_allocate(hr);
270 }
271
272 // Up to this point no concurrent thread would have been able to
273 // do any scanning on any region in this series. All the top
274 // fields still point to bottom, so the intersection between
275 // [bottom,top] and [card_start,card_end] will be empty. Before we
276 // update the top fields, we'll do a storestore to make sure that
277 // no thread sees the update to top before the zeroing of the
278 // object header and the BOT initialization.
279 OrderAccess::storestore();
280
281 // Now, we will update the top fields of the "continues humongous"
282 // regions except the last one.
283 for (uint i = first; i < last; ++i) {
284 hr = region_at(i);
285 hr->set_top(hr->end());
286 }
287
288 hr = region_at(last);
289 // If we cannot fit a filler object, we must set top to the end
339 // Policy: Try only empty regions (i.e. already committed first). Maybe we
340 // are lucky enough to find some.
341 first = _hrm->find_contiguous_only_empty(obj_regions);
342 if (first != G1_NO_HRM_INDEX) {
343 _hrm->allocate_free_regions_starting_at(first, obj_regions);
344 }
345 }
346
347 if (first == G1_NO_HRM_INDEX) {
348 // Policy: We could not find enough regions for the humongous object in the
349 // free list. Look through the heap to find a mix of free and uncommitted regions.
350 // If so, try expansion.
351 first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
352 if (first != G1_NO_HRM_INDEX) {
353 // We found something. Make sure these regions are committed, i.e. expand
354 // the heap. Alternatively we could do a defragmentation GC.
355 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
356 word_size * HeapWordSize);
357
358 _hrm->expand_at(first, obj_regions, workers());
359 g1_policy()->record_new_heap_size(num_regions());
360
361 #ifdef ASSERT
362 for (uint i = first; i < first + obj_regions; ++i) {
363 HeapRegion* hr = region_at(i);
364 assert(hr->is_free(), "sanity");
365 assert(hr->is_empty(), "sanity");
366 assert(is_on_master_free_list(hr), "sanity");
367 }
368 #endif
369 _hrm->allocate_free_regions_starting_at(first, obj_regions);
370 } else {
371 // Policy: Potentially trigger a defragmentation GC.
372 }
373 }
374
375 HeapWord* result = NULL;
376 if (first != G1_NO_HRM_INDEX) {
377 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
378 assert(result != NULL, "it should always return a valid result");
379
423
424 // We will loop until a) we manage to successfully perform the
425 // allocation or b) we successfully schedule a collection which
426 // fails to perform the allocation. b) is the only case when we'll
427 // return NULL.
428 HeapWord* result = NULL;
429 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
430 bool should_try_gc;
431 uint gc_count_before;
432
433 {
434 MutexLockerEx x(Heap_lock);
435 result = _allocator->attempt_allocation_locked(word_size);
436 if (result != NULL) {
437 return result;
438 }
439
440 // If the GCLocker is active and we are bound for a GC, try expanding young gen.
441 // This is different to when only GCLocker::needs_gc() is set: try to avoid
442 // waiting because the GCLocker is active to not wait too long.
443 if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
444 // No need for an ergo message here, can_expand_young_list() does this when
445 // it returns true.
446 result = _allocator->attempt_allocation_force(word_size);
447 if (result != NULL) {
448 return result;
449 }
450 }
451 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
452 // the GCLocker initiated GC has been performed and then retry. This includes
453 // the case when the GC Locker is not active but has not been performed.
454 should_try_gc = !GCLocker::needs_gc();
455 // Read the GC count while still holding the Heap_lock.
456 gc_count_before = total_collections();
457 }
458
459 if (should_try_gc) {
460 bool succeeded;
461 result = do_collection_pause(word_size, gc_count_before, &succeeded,
462 GCCause::_g1_inc_collection_pause);
463 if (result != NULL) {
844 // The structure of this method has a lot of similarities to
845 // attempt_allocation_slow(). The reason these two were not merged
846 // into a single one is that such a method would require several "if
847 // allocation is not humongous do this, otherwise do that"
848 // conditional paths which would obscure its flow. In fact, an early
849 // version of this code did use a unified method which was harder to
850 // follow and, as a result, it had subtle bugs that were hard to
851 // track down. So keeping these two methods separate allows each to
852 // be more readable. It will be good to keep these two in sync as
853 // much as possible.
854
855 assert_heap_not_locked_and_not_at_safepoint();
856 assert(is_humongous(word_size), "attempt_allocation_humongous() "
857 "should only be called for humongous allocations");
858
859 // Humongous objects can exhaust the heap quickly, so we should check if we
860 // need to start a marking cycle at each humongous object allocation. We do
861 // the check before we do the actual allocation. The reason for doing it
862 // before the allocation is that we avoid having to keep track of the newly
863 // allocated memory while we do a GC.
864 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
865 word_size)) {
866 collect(GCCause::_g1_humongous_allocation);
867 }
868
869 // We will loop until a) we manage to successfully perform the
870 // allocation or b) we successfully schedule a collection which
871 // fails to perform the allocation. b) is the only case when we'll
872 // return NULL.
873 HeapWord* result = NULL;
874 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
875 bool should_try_gc;
876 uint gc_count_before;
877
878
879 {
880 MutexLockerEx x(Heap_lock);
881
882 // Given that humongous objects are not allocated in young
883 // regions, we'll first try to do the allocation without doing a
884 // collection hoping that there's enough space in the heap.
885 result = humongous_obj_allocate(word_size);
886 if (result != NULL) {
887 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
888 g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
889 return result;
890 }
891
892 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
893 // the GCLocker initiated GC has been performed and then retry. This includes
894 // the case when the GC Locker is not active but has not been performed.
895 should_try_gc = !GCLocker::needs_gc();
896 // Read the GC count while still holding the Heap_lock.
897 gc_count_before = total_collections();
898 }
899
900 if (should_try_gc) {
901 bool succeeded;
902 result = do_collection_pause(word_size, gc_count_before, &succeeded,
903 GCCause::_g1_humongous_allocation);
904 if (result != NULL) {
905 assert(succeeded, "only way to get back a non-NULL result");
906 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
907 Thread::current()->name(), p2i(result));
908 return result;
946 (try_count % QueuedAllocationWarningCount == 0)) {
947 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
948 Thread::current()->name(), try_count, word_size);
949 }
950 }
951
952 ShouldNotReachHere();
953 return NULL;
954 }
955
956 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
957 bool expect_null_mutator_alloc_region) {
958 assert_at_safepoint_on_vm_thread();
959 assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
960 "the current alloc region was unexpectedly found to be non-NULL");
961
962 if (!is_humongous(word_size)) {
963 return _allocator->attempt_allocation_locked(word_size);
964 } else {
965 HeapWord* result = humongous_obj_allocate(word_size);
966 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
967 collector_state()->set_initiate_conc_mark_if_possible(true);
968 }
969 return result;
970 }
971
972 ShouldNotReachHere();
973 }
974
975 class PostCompactionPrinterClosure: public HeapRegionClosure {
976 private:
977 G1HRPrinter* _hr_printer;
978 public:
979 bool do_heap_region(HeapRegion* hr) {
980 assert(!hr->is_young(), "not expecting to find young regions");
981 _hr_printer->post_compaction(hr);
982 return false;
983 }
984
985 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
986 : _hr_printer(hr_printer) { }
1346 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1347 expand_bytes, aligned_expand_bytes);
1348
1349 if (is_maximal_no_gc()) {
1350 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1351 return false;
1352 }
1353
1354 double expand_heap_start_time_sec = os::elapsedTime();
1355 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1356 assert(regions_to_expand > 0, "Must expand by at least one region");
1357
1358 uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1359 if (expand_time_ms != NULL) {
1360 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1361 }
1362
1363 if (expanded_by > 0) {
1364 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1365 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1366 g1_policy()->record_new_heap_size(num_regions());
1367 } else {
1368 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1369
1370 // The expansion of the virtual storage space was unsuccessful.
1371 // Let's see if it was because we ran out of swap.
1372 if (G1ExitOnExpansionFailure &&
1373 _hrm->available() >= regions_to_expand) {
1374 // We had head room...
1375 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1376 }
1377 }
1378 return regions_to_expand > 0;
1379 }
1380
1381 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1382 size_t aligned_shrink_bytes =
1383 ReservedSpace::page_align_size_down(shrink_bytes);
1384 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1385 HeapRegion::GrainBytes);
1386 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1387
1388 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1389 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1390
1391
1392 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1393 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1394 if (num_regions_removed > 0) {
1395 g1_policy()->record_new_heap_size(num_regions());
1396 } else {
1397 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1398 }
1399 }
1400
1401 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1402 _verifier->verify_region_sets_optional();
1403
1404 // We should only reach here at the end of a Full GC or during Remark which
1405 // means we should not not be holding to any GC alloc regions. The method
1406 // below will make sure of that and do any remaining clean up.
1407 _allocator->abandon_gc_alloc_regions();
1408
1409 // Instead of tearing down / rebuilding the free lists here, we
1410 // could instead use the remove_all_pending() method on free_list to
1411 // remove only the ones that we need to remove.
1412 tear_down_region_sets(true /* free_list_only */);
1413 shrink_helper(shrink_bytes);
1414 rebuild_region_sets(true /* free_list_only */);
1415
1493 _listener(),
1494 _hrm(NULL),
1495 _allocator(NULL),
1496 _verifier(NULL),
1497 _summary_bytes_used(0),
1498 _archive_allocator(NULL),
1499 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1500 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1501 _expand_heap_after_alloc_failure(true),
1502 _g1mm(NULL),
1503 _humongous_reclaim_candidates(),
1504 _has_humongous_reclaim_candidates(false),
1505 _hr_printer(),
1506 _collector_state(),
1507 _old_marking_cycles_started(0),
1508 _old_marking_cycles_completed(0),
1509 _eden(),
1510 _survivor(),
1511 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1512 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1513 _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
1514 _heap_sizing_policy(NULL),
1515 _collection_set(this, _g1_policy),
1516 _hot_card_cache(NULL),
1517 _g1_rem_set(NULL),
1518 _dirty_card_queue_set(false),
1519 _cm(NULL),
1520 _cm_thread(NULL),
1521 _cr(NULL),
1522 _task_queues(NULL),
1523 _evacuation_failed(false),
1524 _evacuation_failed_info_array(NULL),
1525 _preserved_marks_set(true /* in_c_heap */),
1526 #ifndef PRODUCT
1527 _evacuation_failure_alot_for_current_gc(false),
1528 _evacuation_failure_alot_gc_number(0),
1529 _evacuation_failure_alot_count(0),
1530 #endif
1531 _ref_processor_stw(NULL),
1532 _is_alive_closure_stw(this),
1533 _is_subject_to_discovery_stw(this),
1534 _ref_processor_cm(NULL),
1535 _is_alive_closure_cm(this),
1536 _is_subject_to_discovery_cm(this),
1537 _in_cset_fast_test() {
1538
1539 _verifier = new G1HeapVerifier(this);
1540
1541 _allocator = new G1Allocator(this);
1542
1543 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1544
1545 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1546
1547 // Override the default _filler_array_max_size so that no humongous filler
1548 // objects are created.
1549 _filler_array_max_size = _humongous_object_threshold_in_words;
1550
1551 uint n_queues = ParallelGCThreads;
1552 _task_queues = new RefToScanQueueSet(n_queues);
1553
1554 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1555
1556 for (uint i = 0; i < n_queues; i++) {
1557 RefToScanQueue* q = new RefToScanQueue();
1558 q->initialize();
1559 _task_queues->register_queue(i, q);
1560 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1561 }
1562
1563 // Initialize the G1EvacuationFailureALot counters and flags.
1620 return JNI_ENOMEM;
1621 }
1622 return JNI_OK;
1623 }
1624
1625 jint G1CollectedHeap::initialize() {
1626 os::enable_vtime();
1627
1628 // Necessary to satisfy locking discipline assertions.
1629
1630 MutexLocker x(Heap_lock);
1631
1632 // While there are no constraints in the GC code that HeapWordSize
1633 // be any particular value, there are multiple other areas in the
1634 // system which believe this to be true (e.g. oop->object_size in some
1635 // cases incorrectly returns the size in wordSize units rather than
1636 // HeapWordSize).
1637 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1638
1639 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1640 size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
1641 size_t heap_alignment = collector_policy()->heap_alignment();
1642
1643 // Ensure that the sizes are properly aligned.
1644 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1645 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1646 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1647
1648 // Reserve the maximum.
1649
1650 // When compressed oops are enabled, the preferred heap base
1651 // is calculated by subtracting the requested size from the
1652 // 32Gb boundary and using the result as the base address for
1653 // heap reservation. If the requested size is not aligned to
1654 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1655 // into the ReservedHeapSpace constructor) then the actual
1656 // base of the reserved heap may end up differing from the
1657 // address that was requested (i.e. the preferred heap base).
1658 // If this happens then we could end up using a non-optimal
1659 // compressed oops mode.
1660
1721 create_aux_memory_mapper("Block Offset Table",
1722 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1723 G1BlockOffsetTable::heap_map_factor());
1724
1725 G1RegionToSpaceMapper* cardtable_storage =
1726 create_aux_memory_mapper("Card Table",
1727 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1728 G1CardTable::heap_map_factor());
1729
1730 G1RegionToSpaceMapper* card_counts_storage =
1731 create_aux_memory_mapper("Card Counts Table",
1732 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1733 G1CardCounts::heap_map_factor());
1734
1735 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1736 G1RegionToSpaceMapper* prev_bitmap_storage =
1737 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1738 G1RegionToSpaceMapper* next_bitmap_storage =
1739 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1740
1741 _hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
1742
1743 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1744 _card_table->initialize(cardtable_storage);
1745 // Do later initialization work for concurrent refinement.
1746 _hot_card_cache->initialize(card_counts_storage);
1747
1748 // 6843694 - ensure that the maximum region index can fit
1749 // in the remembered set structures.
1750 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1751 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1752
1753 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1754 // start within the first card.
1755 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1756 // Also create a G1 rem set.
1757 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1758 _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
1759
1760 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1761 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1762 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1763 "too many cards per region");
1764
1765 FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1766
1767 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1768
1769 {
1770 HeapWord* start = _hrm->reserved().start();
1771 HeapWord* end = _hrm->reserved().end();
1772 size_t granularity = HeapRegion::GrainBytes;
1773
1774 _in_cset_fast_test.initialize(start, end, granularity);
1775 _humongous_reclaim_candidates.initialize(start, end, granularity);
1776 }
1777
1778 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1782 return JNI_ENOMEM;
1783 }
1784 _workers->initialize_workers();
1785
1786 // Create the G1ConcurrentMark data structure and thread.
1787 // (Must do this late, so that "max_regions" is defined.)
1788 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1789 if (_cm == NULL || !_cm->completed_initialization()) {
1790 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1791 return JNI_ENOMEM;
1792 }
1793 _cm_thread = _cm->cm_thread();
1794
1795 // Now expand into the initial heap size.
1796 if (!expand(init_byte_size, _workers)) {
1797 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1798 return JNI_ENOMEM;
1799 }
1800
1801 // Perform any initialization actions delegated to the policy.
1802 g1_policy()->init(this, &_collection_set);
1803
1804 jint ecode = initialize_concurrent_refinement();
1805 if (ecode != JNI_OK) {
1806 return ecode;
1807 }
1808
1809 ecode = initialize_young_gen_sampling_thread();
1810 if (ecode != JNI_OK) {
1811 return ecode;
1812 }
1813
1814 {
1815 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1816 dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
1817 dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
1818 }
1819
1820 // Here we allocate the dummy HeapRegion that is required by the
1821 // G1AllocRegion class.
1822 HeapRegion* dummy_region = _hrm->get_dummy_region();
1922 false, // Reference discovery is not atomic
1923 &_is_alive_closure_cm, // is alive closure
1924 true); // allow changes to number of processing threads
1925
1926 // STW ref processor
1927 _ref_processor_stw =
1928 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1929 mt_processing, // mt processing
1930 ParallelGCThreads, // degree of mt processing
1931 (ParallelGCThreads > 1), // mt discovery
1932 ParallelGCThreads, // degree of mt discovery
1933 true, // Reference discovery is atomic
1934 &_is_alive_closure_stw, // is alive closure
1935 true); // allow changes to number of processing threads
1936 }
1937
1938 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1939 return _collector_policy;
1940 }
1941
1942 G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
1943 return _collector_policy;
1944 }
1945
1946 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1947 return &_soft_ref_policy;
1948 }
1949
1950 size_t G1CollectedHeap::capacity() const {
1951 return _hrm->length() * HeapRegion::GrainBytes;
1952 }
1953
1954 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1955 return _hrm->total_free_bytes();
1956 }
1957
1958 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1959 _hot_card_cache->drain(cl, worker_i);
1960 }
1961
1962 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1963 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1964 size_t n_completed_buffers = 0;
1965 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1966 n_completed_buffers++;
1967 }
1968 assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
1969 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1970 }
1971
1972 // Computes the sum of the storage used by the various regions.
1973 size_t G1CollectedHeap::used() const {
1974 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1975 if (_archive_allocator != NULL) {
1976 result += _archive_allocator->used();
1977 }
1978 return result;
1979 }
1980
1981 size_t G1CollectedHeap::used_unlocked() const {
1982 return _summary_bytes_used;
1983 }
1984
1985 class SumUsedClosure: public HeapRegionClosure {
1986 size_t _used;
1987 public:
1988 SumUsedClosure() : _used(0) {}
1989 bool do_heap_region(HeapRegion* r) {
2001
2002 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2003 switch (cause) {
2004 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2005 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2006 case GCCause::_wb_conc_mark: return true;
2007 default : return false;
2008 }
2009 }
2010
2011 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2012 switch (cause) {
2013 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2014 case GCCause::_g1_humongous_allocation: return true;
2015 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2016 default: return is_user_requested_concurrent_full_gc(cause);
2017 }
2018 }
2019
2020 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
2021 if(g1_policy()->force_upgrade_to_full()) {
2022 return true;
2023 } else if (should_do_concurrent_full_gc(_gc_cause)) {
2024 return false;
2025 } else if (has_regions_left_for_allocation()) {
2026 return false;
2027 } else {
2028 return true;
2029 }
2030 }
2031
2032 #ifndef PRODUCT
2033 void G1CollectedHeap::allocate_dummy_regions() {
2034 // Let's fill up most of the region
2035 size_t word_size = HeapRegion::GrainWords - 1024;
2036 // And as a result the region we'll allocate will be humongous.
2037 guarantee(is_humongous(word_size), "sanity");
2038
2039 // _filler_array_max_size is set to humongous object threshold
2040 // but temporarily change it to use CollectedHeap::fill_with_object().
2041 SizeTFlagSetting fs(_filler_array_max_size, word_size);
2129 uint old_marking_count_before;
2130 uint full_gc_count_before;
2131
2132 {
2133 MutexLocker ml(Heap_lock);
2134
2135 // Read the GC count while holding the Heap_lock
2136 gc_count_before = total_collections();
2137 full_gc_count_before = total_full_collections();
2138 old_marking_count_before = _old_marking_cycles_started;
2139 }
2140
2141 if (should_do_concurrent_full_gc(cause)) {
2142 // Schedule an initial-mark evacuation pause that will start a
2143 // concurrent cycle. We're setting word_size to 0 which means that
2144 // we are not requesting a post-GC allocation.
2145 VM_G1CollectForAllocation op(0, /* word_size */
2146 gc_count_before,
2147 cause,
2148 true, /* should_initiate_conc_mark */
2149 g1_policy()->max_pause_time_ms());
2150 VMThread::execute(&op);
2151 vmop_succeeded = op.pause_succeeded();
2152 if (!vmop_succeeded && retry_on_vmop_failure) {
2153 if (old_marking_count_before == _old_marking_cycles_started) {
2154 should_retry_vmop = op.should_retry_gc();
2155 } else {
2156 // A Full GC happened while we were trying to schedule the
2157 // concurrent cycle. No point in starting a new cycle given
2158 // that the whole heap was collected anyway.
2159 }
2160
2161 if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) {
2162 GCLocker::stall_until_clear();
2163 }
2164 }
2165 } else {
2166 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2167 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2168
2169 // Schedule a standard evacuation pause. We're setting word_size
2170 // to 0 which means that we are not requesting a post-GC allocation.
2171 VM_G1CollectForAllocation op(0, /* word_size */
2172 gc_count_before,
2173 cause,
2174 false, /* should_initiate_conc_mark */
2175 g1_policy()->max_pause_time_ms());
2176 VMThread::execute(&op);
2177 vmop_succeeded = op.pause_succeeded();
2178 } else {
2179 // Schedule a Full GC.
2180 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2181 VMThread::execute(&op);
2182 vmop_succeeded = op.pause_succeeded();
2183 }
2184 }
2185 } while (should_retry_vmop);
2186 return vmop_succeeded;
2187 }
2188
2189 bool G1CollectedHeap::is_in(const void* p) const {
2190 if (_hrm->reserved().contains(p)) {
2191 // Given that we know that p is in the reserved space,
2192 // heap_region_containing() should successfully
2193 // return the containing region.
2194 HeapRegion* hr = heap_region_containing(p);
2195 return hr->is_in(p);
2257 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2258 HeapRegion* hr = heap_region_containing(addr);
2259 return hr->block_start(addr);
2260 }
2261
2262 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2263 HeapRegion* hr = heap_region_containing(addr);
2264 return hr->block_size(addr);
2265 }
2266
2267 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2268 HeapRegion* hr = heap_region_containing(addr);
2269 return hr->block_is_obj(addr);
2270 }
2271
2272 bool G1CollectedHeap::supports_tlab_allocation() const {
2273 return true;
2274 }
2275
2276 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2277 return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2278 }
2279
2280 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2281 return _eden.length() * HeapRegion::GrainBytes;
2282 }
2283
2284 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2285 // must be equal to the humongous object limit.
2286 size_t G1CollectedHeap::max_tlab_size() const {
2287 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2288 }
2289
2290 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2291 return _allocator->unsafe_max_tlab_alloc();
2292 }
2293
2294 size_t G1CollectedHeap::max_capacity() const {
2295 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2296 }
2297
2298 size_t G1CollectedHeap::max_reserved_capacity() const {
2299 return _hrm->max_length() * HeapRegion::GrainBytes;
2300 }
2301
2302 jlong G1CollectedHeap::millis_since_last_gc() {
2303 // See the notes in GenCollectedHeap::millis_since_last_gc()
2304 // for more information about the implementation.
2305 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2306 _g1_policy->collection_pause_end_millis();
2307 if (ret_val < 0) {
2308 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2309 ". returning zero instead.", ret_val);
2310 return 0;
2311 }
2312 return ret_val;
2313 }
2314
2315 void G1CollectedHeap::deduplicate_string(oop str) {
2316 assert(java_lang_String::is_instance(str), "invariant");
2317
2318 if (G1StringDedup::is_enabled()) {
2319 G1StringDedup::deduplicate(str);
2320 }
2321 }
2322
2323 void G1CollectedHeap::prepare_for_verify() {
2324 _verifier->prepare_for_verify();
2325 }
2326
2327 void G1CollectedHeap::verify(VerifyOption vo) {
2328 _verifier->verify(vo);
2329 }
2330
2331 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2332 return true;
2333 }
2334
2335 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2336 return _cm_thread->request_concurrent_phase(phase);
2337 }
2338
2339 class PrintRegionClosure: public HeapRegionClosure {
2340 outputStream* _st;
2341 public:
2342 PrintRegionClosure(outputStream* st) : _st(st) {}
2343 bool do_heap_region(HeapRegion* r) {
2344 r->print_on(_st);
2345 return false;
2346 }
2347 };
2348
2349 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2350 const HeapRegion* hr,
2351 const VerifyOption vo) const {
2352 switch (vo) {
2353 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2354 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2355 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2356 default: ShouldNotReachHere();
2357 }
2358 return false; // keep some compilers happy
2428 _cm->print_worker_threads_on(st);
2429 _cr->print_threads_on(st);
2430 _young_gen_sampling_thread->print_on(st);
2431 if (G1StringDedup::is_enabled()) {
2432 G1StringDedup::print_worker_threads_on(st);
2433 }
2434 }
2435
2436 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2437 workers()->threads_do(tc);
2438 tc->do_thread(_cm_thread);
2439 _cm->threads_do(tc);
2440 _cr->threads_do(tc);
2441 tc->do_thread(_young_gen_sampling_thread);
2442 if (G1StringDedup::is_enabled()) {
2443 G1StringDedup::threads_do(tc);
2444 }
2445 }
2446
2447 void G1CollectedHeap::print_tracing_info() const {
2448 g1_rem_set()->print_summary_info();
2449 concurrent_mark()->print_summary_info();
2450 }
2451
2452 #ifndef PRODUCT
2453 // Helpful for debugging RSet issues.
2454
2455 class PrintRSetsClosure : public HeapRegionClosure {
2456 private:
2457 const char* _msg;
2458 size_t _occupied_sum;
2459
2460 public:
2461 bool do_heap_region(HeapRegion* r) {
2462 HeapRegionRemSet* hrrs = r->rem_set();
2463 size_t occupied = hrrs->occupied();
2464 _occupied_sum += occupied;
2465
2466 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2467 if (occupied == 0) {
2468 tty->print_cr(" RSet is empty");
2488 };
2489
2490 void G1CollectedHeap::print_cset_rsets() {
2491 PrintRSetsClosure cl("Printing CSet RSets");
2492 collection_set_iterate(&cl);
2493 }
2494
2495 void G1CollectedHeap::print_all_rsets() {
2496 PrintRSetsClosure cl("Printing All RSets");;
2497 heap_region_iterate(&cl);
2498 }
2499 #endif // PRODUCT
2500
2501 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2502
2503 size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2504 size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2505 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2506
2507 size_t eden_capacity_bytes =
2508 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2509
2510 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2511 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2512 eden_capacity_bytes, survivor_used_bytes, num_regions());
2513 }
2514
2515 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2516 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2517 stats->unused(), stats->used(), stats->region_end_waste(),
2518 stats->regions_filled(), stats->direct_allocated(),
2519 stats->failure_used(), stats->failure_waste());
2520 }
2521
2522 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2523 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2524 gc_tracer->report_gc_heap_summary(when, heap_summary);
2525
2526 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2527 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2528 }
2529
2530 G1CollectedHeap* G1CollectedHeap::heap() {
2531 CollectedHeap* heap = Universe::heap();
2532 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2533 assert(heap->kind() == CollectedHeap::G1, "Invalid name");
2534 return (G1CollectedHeap*)heap;
2535 }
2536
2537 void G1CollectedHeap::gc_prologue(bool full) {
2538 // always_do_update_barrier = false;
2539 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2540
2541 // This summary needs to be printed before incrementing total collections.
2542 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2543
2544 // Update common counters.
2545 increment_total_collections(full /* full gc */);
2546 if (full) {
2547 increment_old_marking_cycles_started();
2548 }
2549
2550 // Fill TLAB's and such
2551 double start = os::elapsedTime();
2552 ensure_parsability(true);
2553 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2554 }
2555
2556 void G1CollectedHeap::gc_epilogue(bool full) {
2557 // Update common counters.
2558 if (full) {
2559 // Update the number of full collections that have been completed.
2560 increment_old_marking_cycles_completed(false /* concurrent */);
2561 }
2562
2563 // We are at the end of the GC. Total collections has already been increased.
2564 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2565
2566 // FIXME: what is this about?
2567 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2568 // is set.
2569 #if COMPILER2_OR_JVMCI
2570 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2571 #endif
2572 // always_do_update_barrier = true;
2573
2574 double start = os::elapsedTime();
2575 resize_all_tlabs();
2576 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2577
2578 MemoryService::track_memory_usage();
2579 // We have just completed a GC. Update the soft reference
2580 // policy with the new heap occupancy
2581 Universe::update_heap_info_at_gc();
2582 }
2583
2584 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2585 uint gc_count_before,
2586 bool* succeeded,
2587 GCCause::Cause gc_cause) {
2588 assert_heap_not_locked_and_not_at_safepoint();
2589 VM_G1CollectForAllocation op(word_size,
2590 gc_count_before,
2591 gc_cause,
2592 false, /* should_initiate_conc_mark */
2593 g1_policy()->max_pause_time_ms());
2594 VMThread::execute(&op);
2595
2596 HeapWord* result = op.result();
2597 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2598 assert(result == NULL || ret_succeeded,
2599 "the result should be NULL if the VM did not succeed");
2600 *succeeded = ret_succeeded;
2601
2602 assert_heap_not_locked();
2603 return result;
2604 }
2605
2606 void G1CollectedHeap::do_concurrent_mark() {
2607 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2608 if (!_cm_thread->in_progress()) {
2609 _cm_thread->set_started();
2610 CGC_lock->notify();
2611 }
2612 }
2613
2750 // Clear_locked() above sets the state to Empty. However we want to continue
2751 // collecting remembered set entries for humongous regions that were not
2752 // reclaimed.
2753 r->rem_set()->set_state_complete();
2754 }
2755 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2756 }
2757 _total_humongous++;
2758
2759 return false;
2760 }
2761
2762 size_t total_humongous() const { return _total_humongous; }
2763 size_t candidate_humongous() const { return _candidate_humongous; }
2764
2765 void flush_rem_set_entries() { _dcq.flush(); }
2766 };
2767
2768 void G1CollectedHeap::register_humongous_regions_with_cset() {
2769 if (!G1EagerReclaimHumongousObjects) {
2770 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2771 return;
2772 }
2773 double time = os::elapsed_counter();
2774
2775 // Collect reclaim candidate information and register candidates with cset.
2776 RegisterHumongousWithInCSetFastTestClosure cl;
2777 heap_region_iterate(&cl);
2778
2779 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2780 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2781 cl.total_humongous(),
2782 cl.candidate_humongous());
2783 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2784
2785 // Finally flush all remembered set entries to re-check into the global DCQS.
2786 cl.flush_rem_set_entries();
2787 }
2788
2789 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2790 public:
2791 bool do_heap_region(HeapRegion* hr) {
2792 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2793 hr->verify_rem_set();
2794 }
2795 return false;
2796 }
2797 };
2798
2799 uint G1CollectedHeap::num_task_queues() const {
2800 return _task_queues->size();
2832 void G1CollectedHeap::reset_taskqueue_stats() {
2833 const uint n = num_task_queues();
2834 for (uint i = 0; i < n; ++i) {
2835 task_queue(i)->stats.reset();
2836 }
2837 }
2838 #endif // TASKQUEUE_STATS
2839
2840 void G1CollectedHeap::wait_for_root_region_scanning() {
2841 double scan_wait_start = os::elapsedTime();
2842 // We have to wait until the CM threads finish scanning the
2843 // root regions as it's the only way to ensure that all the
2844 // objects on them have been correctly scanned before we start
2845 // moving them during the GC.
2846 bool waited = _cm->root_regions()->wait_until_scan_finished();
2847 double wait_time_ms = 0.0;
2848 if (waited) {
2849 double scan_wait_end = os::elapsedTime();
2850 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2851 }
2852 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2853 }
2854
2855 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2856 private:
2857 G1HRPrinter* _hr_printer;
2858 public:
2859 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2860
2861 virtual bool do_heap_region(HeapRegion* r) {
2862 _hr_printer->cset(r);
2863 return false;
2864 }
2865 };
2866
2867 void G1CollectedHeap::start_new_collection_set() {
2868 collection_set()->start_incremental_building();
2869
2870 clear_cset_fast_test();
2871
2872 guarantee(_eden.length() == 0, "eden should have been cleared");
2873 g1_policy()->transfer_survivors_to_cset(survivor());
2874 }
2875
2876 bool
2877 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2878 assert_at_safepoint_on_vm_thread();
2879 guarantee(!is_gc_active(), "collection is not reentrant");
2880
2881 if (GCLocker::check_active_before_gc()) {
2882 return false;
2883 }
2884
2885 _gc_timer_stw->register_gc_start();
2886
2887 GCIdMark gc_id_mark;
2888 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2889
2890 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2891 ResourceMark rm;
2892
2893 g1_policy()->note_gc_start();
2894
2895 wait_for_root_region_scanning();
2896
2897 print_heap_before_gc();
2898 print_heap_regions();
2899 trace_heap_before_gc(_gc_tracer_stw);
2900
2901 _verifier->verify_region_sets_optional();
2902 _verifier->verify_dirty_young_regions();
2903
2904 // We should not be doing initial mark unless the conc mark thread is running
2905 if (!_cm_thread->should_terminate()) {
2906 // This call will decide whether this pause is an initial-mark
2907 // pause. If it is, in_initial_mark_gc() will return true
2908 // for the duration of this pause.
2909 g1_policy()->decide_on_conc_mark_initiation();
2910 }
2911
2912 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2913 assert(!collector_state()->in_initial_mark_gc() ||
2914 collector_state()->in_young_only_phase(), "sanity");
2915
2916 // We also do not allow mixed GCs during marking.
2917 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2918
2919 // Record whether this pause is an initial mark. When the current
2920 // thread has completed its logging output and it's safe to signal
2921 // the CM thread, the flag's value in the policy has been reset.
2922 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2923
2924 // Inner scope for scope based logging, timers, and stats collection
2925 {
2926 G1EvacuationInfo evacuation_info;
2927
2928 if (collector_state()->in_initial_mark_gc()) {
2929 // We are about to start a marking cycle, so we increment the
3001 // We want to temporarily turn off discovery by the
3002 // CM ref processor, if necessary, and turn it back on
3003 // on again later if we do. Using a scoped
3004 // NoRefDiscovery object will do this.
3005 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3006
3007 // Forget the current alloc region (we might even choose it to be part
3008 // of the collection set!).
3009 _allocator->release_mutator_alloc_region();
3010
3011 // This timing is only used by the ergonomics to handle our pause target.
3012 // It is unclear why this should not include the full pause. We will
3013 // investigate this in CR 7178365.
3014 //
3015 // Preserving the old comment here if that helps the investigation:
3016 //
3017 // The elapsed time induced by the start time below deliberately elides
3018 // the possible verification above.
3019 double sample_start_time_sec = os::elapsedTime();
3020
3021 g1_policy()->record_collection_pause_start(sample_start_time_sec);
3022
3023 if (collector_state()->in_initial_mark_gc()) {
3024 concurrent_mark()->pre_initial_mark();
3025 }
3026
3027 g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
3028
3029 evacuation_info.set_collectionset_regions(collection_set()->region_length());
3030
3031 register_humongous_regions_with_cset();
3032
3033 assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3034
3035 // We call this after finalize_cset() to
3036 // ensure that the CSet has been finalized.
3037 _cm->verify_no_cset_oops();
3038
3039 if (_hr_printer.is_active()) {
3040 G1PrintCollectionSetClosure cl(&_hr_printer);
3041 _collection_set.iterate(&cl);
3042 }
3043
3044 // Initialize the GC alloc regions.
3045 _allocator->init_gc_alloc_regions(evacuation_info);
3046
3047 G1ParScanThreadStateSet per_thread_states(this,
3050 collection_set()->optional_region_length());
3051 pre_evacuate_collection_set();
3052
3053 // Actually do the work...
3054 evacuate_collection_set(&per_thread_states);
3055 evacuate_optional_collection_set(&per_thread_states);
3056
3057 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3058
3059 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3060 free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3061
3062 eagerly_reclaim_humongous_regions();
3063
3064 record_obj_copy_mem_stats();
3065 _survivor_evac_stats.adjust_desired_plab_sz();
3066 _old_evac_stats.adjust_desired_plab_sz();
3067
3068 double start = os::elapsedTime();
3069 start_new_collection_set();
3070 g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3071
3072 if (evacuation_failed()) {
3073 double recalculate_used_start = os::elapsedTime();
3074 set_used(recalculate_used());
3075 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
3076
3077 if (_archive_allocator != NULL) {
3078 _archive_allocator->clear_used();
3079 }
3080 for (uint i = 0; i < ParallelGCThreads; i++) {
3081 if (_evacuation_failed_info_array[i].has_failed()) {
3082 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3083 }
3084 }
3085 } else {
3086 // The "used" of the the collection set have already been subtracted
3087 // when they were freed. Add in the bytes evacuated.
3088 increase_used(g1_policy()->bytes_copied_during_gc());
3089 }
3090
3091 if (collector_state()->in_initial_mark_gc()) {
3092 // We have to do this before we notify the CM threads that
3093 // they can start working to make sure that all the
3094 // appropriate initialization is done on the CM object.
3095 concurrent_mark()->post_initial_mark();
3096 // Note that we don't actually trigger the CM thread at
3097 // this point. We do that later when we're sure that
3098 // the current thread has completed its logging output.
3099 }
3100
3101 allocate_dummy_regions();
3102
3103 _allocator->init_mutator_alloc_region();
3104
3105 {
3106 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3107 if (expand_bytes > 0) {
3108 size_t bytes_before = capacity();
3109 // No need for an ergo logging here,
3110 // expansion_amount() does this when it returns a value > 0.
3111 double expand_ms;
3112 if (!expand(expand_bytes, _workers, &expand_ms)) {
3113 // We failed to expand the heap. Cannot do anything about it.
3114 }
3115 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3116 }
3117 }
3118
3119 // We redo the verification but now wrt to the new CSet which
3120 // has just got initialized after the previous CSet was freed.
3121 _cm->verify_no_cset_oops();
3122
3123 // This timing is only used by the ergonomics to handle our pause target.
3124 // It is unclear why this should not include the full pause. We will
3125 // investigate this in CR 7178365.
3126 double sample_end_time_sec = os::elapsedTime();
3127 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3128 size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
3129 g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3130
3131 evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3132 evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3133
3134 if (VerifyRememberedSets) {
3135 log_info(gc, verify)("[Verifying RemSets after GC]");
3136 VerifyRegionRemSetClosure v_cl;
3137 heap_region_iterate(&v_cl);
3138 }
3139
3140 _verifier->verify_after_gc(verify_type);
3141 _verifier->check_bitmaps("GC End");
3142
3143 assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
3144 _ref_processor_stw->verify_no_references_recorded();
3145
3146 // CM reference discovery will be re-enabled if necessary.
3147 }
3148
3149 #ifdef TRACESPINNING
3150 ParallelTaskTerminator::print_termination_counts();
3151 #endif
3152
3153 gc_epilogue(false);
3154 }
3155
3156 // Print the remainder of the GC log output.
3157 if (evacuation_failed()) {
3158 log_info(gc)("To-space exhausted");
3159 }
3160
3161 g1_policy()->print_phases();
3162 heap_transition.print();
3163
3164 // It is not yet to safe to tell the concurrent mark to
3165 // start as we have some optional output below. We don't want the
3166 // output from the concurrent mark thread interfering with this
3167 // logging output either.
3168
3169 _hrm->verify_optional();
3170 _verifier->verify_region_sets_optional();
3171
3172 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3173 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3174
3175 print_heap_after_gc();
3176 print_heap_regions();
3177 trace_heap_after_gc(_gc_tracer_stw);
3178
3179 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3180 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3181 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3182 // before any GC notifications are raised.
3183 g1mm()->update_sizes();
3184
3185 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3186 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3187 _gc_timer_stw->register_gc_end();
3188 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3189 }
3190 // It should now be safe to tell the concurrent mark thread to start
3191 // without its logging output interfering with the logging output
3192 // that came from the pause.
3193
3194 if (should_start_conc_mark) {
3195 // CAUTION: after the doConcurrentMark() call below,
3196 // the concurrent marking thread(s) could be running
3197 // concurrently with us. Make sure that anything after
3198 // this point does not assume that we are the only GC thread
3199 // running. Note: of course, the actual marking work will
3200 // not start until the safepoint itself is released in
3201 // SuspendibleThreadSet::desynchronize().
3202 do_concurrent_mark();
3203 }
3204
3205 return true;
3206 }
3207
3208 void G1CollectedHeap::remove_self_forwarding_pointers() {
3209 G1ParRemoveSelfForwardPtrsTask rsfp_task;
3210 workers()->run_task(&rsfp_task);
3211 }
3212
3213 void G1CollectedHeap::restore_after_evac_failure() {
3214 double remove_self_forwards_start = os::elapsedTime();
3215
3216 remove_self_forwarding_pointers();
3217 SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3218 _preserved_marks_set.restore(&task_executor);
3219
3220 g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3221 }
3222
3223 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3224 if (!_evacuation_failed) {
3225 _evacuation_failed = true;
3226 }
3227
3228 _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3229 _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3230 }
3231
3232 bool G1ParEvacuateFollowersClosure::offer_termination() {
3233 EventGCPhaseParallel event;
3234 G1ParScanThreadState* const pss = par_scan_state();
3235 start_term_time();
3236 const bool res = terminator()->offer_termination();
3237 end_term_time();
3238 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
3239 return res;
3240 }
3258 RefToScanQueueSet* _queues;
3259 G1RootProcessor* _root_processor;
3260 TaskTerminator _terminator;
3261 uint _n_workers;
3262
3263 public:
3264 G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3265 : AbstractGangTask("G1 collection"),
3266 _g1h(g1h),
3267 _pss(per_thread_states),
3268 _queues(task_queues),
3269 _root_processor(root_processor),
3270 _terminator(n_workers, _queues),
3271 _n_workers(n_workers)
3272 {}
3273
3274 void work(uint worker_id) {
3275 if (worker_id >= _n_workers) return; // no work needed this round
3276
3277 double start_sec = os::elapsedTime();
3278 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3279
3280 {
3281 ResourceMark rm;
3282 HandleMark hm;
3283
3284 ReferenceProcessor* rp = _g1h->ref_processor_stw();
3285
3286 G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3287 pss->set_ref_discoverer(rp);
3288
3289 double start_strong_roots_sec = os::elapsedTime();
3290
3291 _root_processor->evacuate_roots(pss, worker_id);
3292
3293 _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
3294
3295 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3296
3297 double term_sec = 0.0;
3298 size_t evac_term_attempts = 0;
3299 {
3300 double start = os::elapsedTime();
3301 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
3302 evac.do_void();
3303
3304 evac_term_attempts = evac.term_attempts();
3305 term_sec = evac.term_time();
3306 double elapsed_sec = os::elapsedTime() - start;
3307
3308 G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3309 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3310
3311 p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
3312 worker_id,
3313 pss->lab_waste_words() * HeapWordSize,
3314 G1GCPhaseTimes::ObjCopyLABWaste);
3315 p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
3316 worker_id,
3317 pss->lab_undo_waste_words() * HeapWordSize,
3318 G1GCPhaseTimes::ObjCopyLABUndoWaste);
3319
3320 p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3321 p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3322 }
3323
3324 assert(pss->queue_is_empty(), "should be empty");
3325
3326 // Close the inner scope so that the ResourceMark and HandleMark
3327 // destructors are executed here and are included as part of the
3328 // "GC Worker Time".
3329 }
3330 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3331 }
3332 };
3333
3334 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3335 bool class_unloading_occurred) {
3336 uint num_workers = workers()->active_workers();
3337 ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false);
3338 workers()->run_task(&unlink_task);
3339 }
3340
3341 // Clean string dedup data structures.
3342 // Ideally we would prefer to use a StringDedupCleaningTask here, but we want to
3343 // record the durations of the phases. Hence the almost-copy.
3344 class G1StringDedupCleaningTask : public AbstractGangTask {
3345 BoolObjectClosure* _is_alive;
3346 OopClosure* _keep_alive;
3347 G1GCPhaseTimes* _phase_times;
3348
3349 public:
3350 G1StringDedupCleaningTask(BoolObjectClosure* is_alive,
3375 }
3376 }
3377 };
3378
3379 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3380 OopClosure* keep_alive,
3381 G1GCPhaseTimes* phase_times) {
3382 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3383 workers()->run_task(&cl);
3384 }
3385
3386 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3387 private:
3388 G1DirtyCardQueueSet* _queue;
3389 G1CollectedHeap* _g1h;
3390 public:
3391 G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3392 _queue(queue), _g1h(g1h) { }
3393
3394 virtual void work(uint worker_id) {
3395 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3396 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3397
3398 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3399 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3400
3401 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3402 }
3403 };
3404
3405 void G1CollectedHeap::redirty_logged_cards() {
3406 double redirty_logged_cards_start = os::elapsedTime();
3407
3408 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3409 dirty_card_queue_set().reset_for_par_iteration();
3410 workers()->run_task(&redirty_task);
3411
3412 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3413 dcq.merge_bufferlists(&dirty_card_queue_set());
3414 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3415
3416 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3417 }
3418
3419 // Weak Reference Processing support
3420
3421 bool G1STWIsAliveClosure::do_object_b(oop p) {
3422 // An object is reachable if it is outside the collection set,
3423 // or is inside and copied.
3424 return !_g1h->is_in_cset(p) || p->is_forwarded();
3425 }
3426
3427 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3428 assert(obj != NULL, "must not be NULL");
3429 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3430 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3431 // may falsely indicate that this is not the case here: however the collection set only
3432 // contains old regions when concurrent mark is not running.
3433 return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3434 }
3435
3436 // Non Copying Keep Alive closure
3629
3630 // Even when parallel reference processing is enabled, the processing
3631 // of JNI refs is serial and performed serially by the current thread
3632 // rather than by a worker. The following PSS will be used for processing
3633 // JNI refs.
3634
3635 // Use only a single queue for this PSS.
3636 G1ParScanThreadState* pss = per_thread_states->state_for_worker(0);
3637 pss->set_ref_discoverer(NULL);
3638 assert(pss->queue_is_empty(), "pre-condition");
3639
3640 // Keep alive closure.
3641 G1CopyingKeepAliveClosure keep_alive(this, pss);
3642
3643 // Serial Complete GC closure
3644 G1STWDrainQueueClosure drain_queue(this, pss);
3645
3646 // Setup the soft refs policy...
3647 rp->setup_policy(false);
3648
3649 ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
3650
3651 ReferenceProcessorStats stats;
3652 if (!rp->processing_is_mt()) {
3653 // Serial reference processing...
3654 stats = rp->process_discovered_references(&is_alive,
3655 &keep_alive,
3656 &drain_queue,
3657 NULL,
3658 pt);
3659 } else {
3660 uint no_of_gc_workers = workers()->active_workers();
3661
3662 // Parallel reference processing
3663 assert(no_of_gc_workers <= rp->max_num_queues(),
3664 "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3665 no_of_gc_workers, rp->max_num_queues());
3666
3667 G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues);
3668 stats = rp->process_discovered_references(&is_alive,
3669 &keep_alive,
3670 &drain_queue,
3671 &par_task_executor,
3672 pt);
3673 }
3674
3675 _gc_tracer_stw->report_gc_reference_stats(stats);
3676
3677 // We have completed copying any necessary live referent objects.
3678 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3679
3680 make_pending_list_reachable();
3681
3682 rp->verify_no_references_recorded();
3683
3684 double ref_proc_time = os::elapsedTime() - ref_proc_start;
3685 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3686 }
3687
3688 void G1CollectedHeap::make_pending_list_reachable() {
3689 if (collector_state()->in_initial_mark_gc()) {
3690 oop pll_head = Universe::reference_pending_list();
3691 if (pll_head != NULL) {
3692 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3693 _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3694 }
3695 }
3696 }
3697
3698 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3699 double merge_pss_time_start = os::elapsedTime();
3700 per_thread_states->flush();
3701 g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3702 }
3703
3704 void G1CollectedHeap::pre_evacuate_collection_set() {
3705 _expand_heap_after_alloc_failure = true;
3706 _evacuation_failed = false;
3707
3708 // Disable the hot card cache.
3709 _hot_card_cache->reset_hot_cache_claimed_index();
3710 _hot_card_cache->set_use_cache(false);
3711
3712 g1_rem_set()->prepare_for_oops_into_collection_set_do();
3713 _preserved_marks_set.assert_empty();
3714
3715 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3716
3717 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3718 if (collector_state()->in_initial_mark_gc()) {
3719 double start_clear_claimed_marks = os::elapsedTime();
3720
3721 ClassLoaderDataGraph::clear_claimed_marks();
3722
3723 double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3724 phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3725 }
3726 }
3727
3728 void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3729 // Should G1EvacuationFailureALot be in effect for this GC?
3730 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3731
3732 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
3733
3734 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3735
3736 double start_par_time_sec = os::elapsedTime();
3737 double end_par_time_sec;
3738
3739 {
3740 const uint n_workers = workers()->active_workers();
3741 G1RootProcessor root_processor(this, n_workers);
3742 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
3743
3744 workers()->run_task(&g1_par_task);
3745 end_par_time_sec = os::elapsedTime();
3746
3747 // Closing the inner scope will execute the destructor
3748 // for the G1RootProcessor object. We record the current
3749 // elapsed time before closing the scope so that time
3750 // taken for the destructor is NOT included in the
3751 // reported parallel time.
3752 }
3753
3754 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
3773 }
3774
3775 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3776 G1EvacuationRootClosures* root_cls = pss->closures();
3777 G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
3778
3779 size_t scanned = 0;
3780 size_t claimed = 0;
3781 size_t skipped = 0;
3782 size_t used_memory = 0;
3783
3784 Ticks start = Ticks::now();
3785 Tickspan copy_time;
3786
3787 for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
3788 HeapRegion* hr = _optional->region_at(i);
3789 G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
3790 pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
3791 copy_time += trim_ticks(pss);
3792
3793 G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
3794 scan_rs_cl.do_heap_region(hr);
3795 copy_time += trim_ticks(pss);
3796 scanned += scan_rs_cl.cards_scanned();
3797 claimed += scan_rs_cl.cards_claimed();
3798 skipped += scan_rs_cl.cards_skipped();
3799
3800 // Chunk lists for this region is no longer needed.
3801 used_memory += pss->oops_into_optional_region(hr)->used_memory();
3802 }
3803
3804 Tickspan scan_time = (Ticks::now() - start) - copy_time;
3805 G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3806 p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
3807 p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
3808
3809 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
3810 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
3811 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
3812 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
3813 }
3814
3815 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3816 Ticks start = Ticks::now();
3817 G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
3818 cl.do_void();
3819
3820 Tickspan evac_time = (Ticks::now() - start);
3821 G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3822 p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
3823 assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
3824 }
3825
3826 public:
3827 G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
3828 G1ParScanThreadStateSet* per_thread_states,
3829 G1OptionalCSet* cset,
3830 RefToScanQueueSet* queues,
3831 uint n_workers) :
3832 AbstractGangTask("G1 Evacuation Optional Region Task"),
3833 _g1h(g1h),
3834 _per_thread_states(per_thread_states),
3835 _optional(cset),
3836 _queues(queues),
3837 _terminator(n_workers, _queues) {
3838 }
3839
3840 void work(uint worker_id) {
3841 ResourceMark rm;
3850 };
3851
3852 void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
3853 class G1MarkScope : public MarkScope {};
3854 G1MarkScope code_mark_scope;
3855
3856 G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
3857 workers()->run_task(&task);
3858 }
3859
3860 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3861 G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
3862 if (optional_cset.is_empty()) {
3863 return;
3864 }
3865
3866 if (evacuation_failed()) {
3867 return;
3868 }
3869
3870 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3871 const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
3872
3873 double start_time_sec = os::elapsedTime();
3874
3875 do {
3876 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3877 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3878
3879 if (time_left_ms < 0) {
3880 log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
3881 break;
3882 }
3883
3884 optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
3885 if (optional_cset.prepare_failed()) {
3886 log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
3887 break;
3888 }
3889
3890 evacuate_optional_regions(per_thread_states, &optional_cset);
3891
3892 optional_cset.complete_evacuation();
3893 if (optional_cset.evacuation_failed()) {
3894 break;
3895 }
3896 } while (!optional_cset.is_empty());
3897
3898 phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
3899 }
3900
3901 void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3902 // Also cleans the card table from temporary duplicate detection information used
3903 // during UpdateRS/ScanRS.
3904 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3905
3906 // Process any discovered reference objects - we have
3907 // to do this _before_ we retire the GC alloc regions
3908 // as we may have to copy some 'reachable' referent
3909 // objects (and their reachable sub-graphs) that were
3910 // not copied during the pause.
3911 process_discovered_references(per_thread_states);
3912
3913 G1STWIsAliveClosure is_alive(this);
3914 G1KeepAliveClosure keep_alive(this);
3915
3916 WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
3917 g1_policy()->phase_times()->weak_phase_times());
3918
3919 if (G1StringDedup::is_enabled()) {
3920 double string_dedup_time_ms = os::elapsedTime();
3921
3922 string_dedup_cleaning(&is_alive, &keep_alive, g1_policy()->phase_times());
3923
3924 double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0;
3925 g1_policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms);
3926 }
3927
3928 if (evacuation_failed()) {
3929 restore_after_evac_failure();
3930
3931 // Reset the G1EvacuationFailureALot counters and flags
3932 // Note: the values are reset only when an actual
3933 // evacuation failure occurs.
3934 NOT_PRODUCT(reset_evacuation_should_fail();)
3935 }
3936
3937 _preserved_marks_set.assert_empty();
3938
3939 _allocator->release_gc_alloc_regions(evacuation_info);
3940
3941 merge_per_thread_state_info(per_thread_states);
3942
3943 // Reset and re-enable the hot card cache.
3944 // Note the counts for the cards in the regions in the
3945 // collection set are reset when the collection set is freed.
3946 _hot_card_cache->reset_hot_cache();
3947 _hot_card_cache->set_use_cache(true);
3948
3949 purge_code_root_memory();
3950
3951 redirty_logged_cards();
3952 #if COMPILER2_OR_JVMCI
3953 double start = os::elapsedTime();
3954 DerivedPointerTable::update_pointers();
3955 g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
3956 #endif
3957 g1_policy()->print_age_table();
3958 }
3959
3960 void G1CollectedHeap::record_obj_copy_mem_stats() {
3961 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
3962
3963 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
3964 create_g1_evac_summary(&_old_evac_stats));
3965 }
3966
3967 void G1CollectedHeap::free_region(HeapRegion* hr,
3968 FreeRegionList* free_list,
3969 bool skip_remset,
3970 bool skip_hot_card_cache,
3971 bool locked) {
3972 assert(!hr->is_free(), "the region should not be free");
3973 assert(!hr->is_empty(), "the region should not be empty");
3974 assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
3975 assert(free_list != NULL, "pre-condition");
3976
3977 if (G1VerifyBitmaps) {
3978 MemRegion mr(hr->bottom(), hr->end());
3979 concurrent_mark()->clear_range_in_prev_bitmap(mr);
3980 }
3981
3982 // Clear the card counts for this region.
3983 // Note: we only need to do this if the region is not young
3984 // (since we don't refine cards in young regions).
3985 if (!skip_hot_card_cache && !hr->is_young()) {
3986 _hot_card_cache->reset_card_counts(hr);
3987 }
3988 hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
3989 _g1_policy->remset_tracker()->update_at_free(hr);
3990 free_list->add_ordered(hr);
3991 }
3992
3993 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
3994 FreeRegionList* free_list) {
3995 assert(hr->is_humongous(), "this is only for humongous regions");
3996 assert(free_list != NULL, "pre-condition");
3997 hr->clear_humongous();
3998 free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
3999 }
4000
4001 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4002 const uint humongous_regions_removed) {
4003 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4004 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4005 _old_set.bulk_remove(old_regions_removed);
4006 _humongous_set.bulk_remove(humongous_regions_removed);
4007 }
4008
4009 }
4099 size_t used_words = r->marked_bytes() / HeapWordSize;
4100
4101 _failure_used_words += used_words;
4102 _failure_waste_words += HeapRegion::GrainWords - used_words;
4103
4104 g1h->old_set_add(r);
4105 _after_used_bytes += r->used();
4106 }
4107 return false;
4108 }
4109
4110 void complete_work() {
4111 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4112
4113 _evacuation_info->set_regions_freed(_local_free_list.length());
4114 _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4115
4116 g1h->prepend_to_freelist(&_local_free_list);
4117 g1h->decrement_summary_bytes(_before_used_bytes);
4118
4119 G1Policy* policy = g1h->g1_policy();
4120 policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4121
4122 g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4123 }
4124 };
4125
4126 G1CollectionSet* _collection_set;
4127 G1SerialFreeCollectionSetClosure _cl;
4128 const size_t* _surviving_young_words;
4129
4130 size_t _rs_lengths;
4131
4132 volatile jint _serial_work_claim;
4133
4134 struct WorkItem {
4135 uint region_idx;
4136 bool is_young;
4137 bool evacuation_failed;
4138
4139 WorkItem(HeapRegion* r) {
4174 private:
4175 size_t _cur_idx;
4176 WorkItem* _work_items;
4177 public:
4178 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4179
4180 virtual bool do_heap_region(HeapRegion* r) {
4181 _work_items[_cur_idx++] = WorkItem(r);
4182 return false;
4183 }
4184 };
4185
4186 void prepare_work() {
4187 G1PrepareFreeCollectionSetClosure cl(_work_items);
4188 _collection_set->iterate(&cl);
4189 }
4190
4191 void complete_work() {
4192 _cl.complete_work();
4193
4194 G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4195 policy->record_max_rs_lengths(_rs_lengths);
4196 policy->cset_regions_freed();
4197 }
4198 public:
4199 G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4200 AbstractGangTask("G1 Free Collection Set"),
4201 _collection_set(collection_set),
4202 _cl(evacuation_info, surviving_young_words),
4203 _surviving_young_words(surviving_young_words),
4204 _rs_lengths(0),
4205 _serial_work_claim(0),
4206 _parallel_work_claim(0),
4207 _num_work_items(collection_set->region_length()),
4208 _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4209 prepare_work();
4210 }
4211
4212 ~G1FreeCollectionSetTask() {
4213 complete_work();
4214 FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4215 }
4216
4217 // Chunk size for work distribution. The chosen value has been determined experimentally
4218 // to be a good tradeoff between overhead and achievable parallelism.
4219 static uint chunk_size() { return 32; }
4220
4221 virtual void work(uint worker_id) {
4222 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4223
4224 // Claim serial work.
4225 if (_serial_work_claim == 0) {
4226 jint value = Atomic::add(1, &_serial_work_claim) - 1;
4227 if (value == 0) {
4228 double serial_time = os::elapsedTime();
4229 do_serial_work();
4230 timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4231 }
4232 }
4233
4234 // Start parallel work.
4235 double young_time = 0.0;
4236 bool has_young_time = false;
4237 double non_young_time = 0.0;
4238 bool has_non_young_time = false;
4239
4240 while (true) {
4241 size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4242 size_t cur = end - chunk_size();
4279 }
4280 };
4281
4282 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4283 _eden.clear();
4284
4285 double free_cset_start_time = os::elapsedTime();
4286
4287 {
4288 uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4289 uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4290
4291 G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4292
4293 log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4294 cl.name(),
4295 num_workers,
4296 _collection_set.region_length());
4297 workers()->run_task(&cl, num_workers);
4298 }
4299 g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4300
4301 collection_set->clear();
4302 }
4303
4304 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4305 private:
4306 FreeRegionList* _free_region_list;
4307 HeapRegionSet* _proxy_set;
4308 uint _humongous_objects_reclaimed;
4309 uint _humongous_regions_reclaimed;
4310 size_t _freed_bytes;
4311 public:
4312
4313 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4314 _free_region_list(free_region_list), _proxy_set(NULL), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4315 }
4316
4317 virtual bool do_heap_region(HeapRegion* r) {
4318 if (!r->is_starts_humongous()) {
4319 return false;
4404 }
4405
4406 uint humongous_objects_reclaimed() {
4407 return _humongous_objects_reclaimed;
4408 }
4409
4410 uint humongous_regions_reclaimed() {
4411 return _humongous_regions_reclaimed;
4412 }
4413
4414 size_t bytes_freed() const {
4415 return _freed_bytes;
4416 }
4417 };
4418
4419 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4420 assert_at_safepoint_on_vm_thread();
4421
4422 if (!G1EagerReclaimHumongousObjects ||
4423 (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
4424 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
4425 return;
4426 }
4427
4428 double start_time = os::elapsedTime();
4429
4430 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
4431
4432 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
4433 heap_region_iterate(&cl);
4434
4435 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4436
4437 G1HRPrinter* hrp = hr_printer();
4438 if (hrp->is_active()) {
4439 FreeRegionListIterator iter(&local_cleanup_list);
4440 while (iter.more_available()) {
4441 HeapRegion* hr = iter.get_next();
4442 hrp->cleanup(hr);
4443 }
4444 }
4445
4446 prepend_to_freelist(&local_cleanup_list);
4447 decrement_summary_bytes(cl.bytes_freed());
4448
4449 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4450 cl.humongous_objects_reclaimed());
4451 }
4452
4453 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4454 public:
4455 virtual bool do_heap_region(HeapRegion* r) {
4456 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4457 G1CollectedHeap::heap()->clear_in_cset(r);
4458 r->set_young_index_in_cset(-1);
4459 return false;
4460 }
4461 };
4462
4463 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4464 G1AbandonCollectionSetClosure cl;
4465 collection_set->iterate(&cl);
4466
4467 collection_set->clear();
4468 collection_set->stop_incremental_building();
4469 }
4470
4471 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4472 return _allocator->is_retained_old_region(hr);
4473 }
4474
4475 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4476 _eden.add(hr);
4477 _g1_policy->set_region_eden(hr);
4478 }
4479
4480 #ifdef ASSERT
4481
4482 class NoYoungRegionsClosure: public HeapRegionClosure {
4483 private:
4484 bool _success;
4485 public:
4486 NoYoungRegionsClosure() : _success(true) { }
4487 bool do_heap_region(HeapRegion* r) {
4488 if (r->is_young()) {
4489 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4490 p2i(r->bottom()), p2i(r->end()));
4491 _success = false;
4492 }
4493 return false;
4494 }
4495 bool success() { return _success; }
4496 };
4497
4627 set_used(cl.total_used());
4628 if (_archive_allocator != NULL) {
4629 _archive_allocator->clear_used();
4630 }
4631 }
4632 assert(used() == recalculate_used(),
4633 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4634 used(), recalculate_used());
4635 }
4636
4637 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4638 HeapRegion* hr = heap_region_containing(p);
4639 return hr->is_in(p);
4640 }
4641
4642 // Methods for the mutator alloc region
4643
4644 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4645 bool force) {
4646 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4647 bool should_allocate = g1_policy()->should_allocate_mutator_region();
4648 if (force || should_allocate) {
4649 HeapRegion* new_alloc_region = new_region(word_size,
4650 HeapRegionType::Eden,
4651 false /* do_expand */);
4652 if (new_alloc_region != NULL) {
4653 set_region_short_lived_locked(new_alloc_region);
4654 _hr_printer.alloc(new_alloc_region, !should_allocate);
4655 _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4656 _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
4657 return new_alloc_region;
4658 }
4659 }
4660 return NULL;
4661 }
4662
4663 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4664 size_t allocated_bytes) {
4665 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4666 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4667
4668 collection_set()->add_eden_region(alloc_region);
4669 increase_used(allocated_bytes);
4670 _hr_printer.retire(alloc_region);
4671 // We update the eden sizes here, when the region is retired,
4672 // instead of when it's allocated, since this is the point that its
4673 // used space has been recorded in _summary_bytes_used.
4674 g1mm()->update_eden_size();
4675 }
4676
4677 // Methods for the GC alloc regions
4678
4679 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
4680 if (dest.is_old()) {
4681 return true;
4682 } else {
4683 return survivor_regions_count() < g1_policy()->max_survivor_regions();
4684 }
4685 }
4686
4687 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
4688 assert(FreeList_lock->owned_by_self(), "pre-condition");
4689
4690 if (!has_more_regions(dest)) {
4691 return NULL;
4692 }
4693
4694 HeapRegionType type;
4695 if (dest.is_young()) {
4696 type = HeapRegionType::Survivor;
4697 } else {
4698 type = HeapRegionType::Old;
4699 }
4700
4701 HeapRegion* new_alloc_region = new_region(word_size,
4702 type,
4703 true /* do_expand */);
4704
4705 if (new_alloc_region != NULL) {
4706 if (type.is_survivor()) {
4707 new_alloc_region->set_survivor();
4708 _survivor.add(new_alloc_region);
4709 _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
4710 } else {
4711 new_alloc_region->set_old();
4712 _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
4713 }
4714 _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
4715 _hr_printer.alloc(new_alloc_region);
4716 return new_alloc_region;
4717 }
4718 return NULL;
4719 }
4720
4721 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4722 size_t allocated_bytes,
4723 InCSetState dest) {
4724 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
4725 if (dest.is_old()) {
4726 old_set_add(alloc_region);
4727 }
4728
4729 bool const during_im = collector_state()->in_initial_mark_gc();
4730 if (during_im && allocated_bytes > 0) {
4731 _cm->root_regions()->add(alloc_region);
4732 }
4733 _hr_printer.retire(alloc_region);
4734 }
4735
4736 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4737 bool expanded = false;
4738 uint index = _hrm->find_highest_free(&expanded);
4739
4740 if (index != G1_NO_HRM_INDEX) {
4741 if (expanded) {
4742 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4743 HeapRegion::GrainWords * HeapWordSize);
4744 }
4809 HeapRegion* hr = heap_region_containing(obj);
4810 return !hr->is_pinned();
4811 }
4812
4813 void G1CollectedHeap::register_nmethod(nmethod* nm) {
4814 guarantee(nm != NULL, "sanity");
4815 RegisterNMethodOopClosure reg_cl(this, nm);
4816 nm->oops_do(®_cl);
4817 }
4818
4819 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
4820 guarantee(nm != NULL, "sanity");
4821 UnregisterNMethodOopClosure reg_cl(this, nm);
4822 nm->oops_do(®_cl, true);
4823 }
4824
4825 void G1CollectedHeap::purge_code_root_memory() {
4826 double purge_start = os::elapsedTime();
4827 G1CodeRootSet::purge();
4828 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
4829 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
4830 }
4831
4832 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
4833 G1CollectedHeap* _g1h;
4834
4835 public:
4836 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
4837 _g1h(g1h) {}
4838
4839 void do_code_blob(CodeBlob* cb) {
4840 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
4841 if (nm == NULL) {
4842 return;
4843 }
4844
4845 if (ScavengeRootsInCode) {
4846 _g1h->register_nmethod(nm);
4847 }
4848 }
4849 };
|
242 // objects, for improved usage accounting.
243 // How many words we use for filler objects.
244 size_t word_fill_size = word_size_sum - word_size;
245
246 // How many words memory we "waste" which cannot hold a filler object.
247 size_t words_not_fillable = 0;
248
249 if (word_fill_size >= min_fill_size()) {
250 fill_with_objects(obj_top, word_fill_size);
251 } else if (word_fill_size > 0) {
252 // We have space to fill, but we cannot fit an object there.
253 words_not_fillable = word_fill_size;
254 word_fill_size = 0;
255 }
256
257 // We will set up the first region as "starts humongous". This
258 // will also update the BOT covering all the regions to reflect
259 // that there is a single object that starts at the bottom of the
260 // first region.
261 first_hr->set_starts_humongous(obj_top, word_fill_size);
262 _policy->remset_tracker()->update_at_allocate(first_hr);
263 // Then, if there are any, we will set up the "continues
264 // humongous" regions.
265 HeapRegion* hr = NULL;
266 for (uint i = first + 1; i <= last; ++i) {
267 hr = region_at(i);
268 hr->set_continues_humongous(first_hr);
269 _policy->remset_tracker()->update_at_allocate(hr);
270 }
271
272 // Up to this point no concurrent thread would have been able to
273 // do any scanning on any region in this series. All the top
274 // fields still point to bottom, so the intersection between
275 // [bottom,top] and [card_start,card_end] will be empty. Before we
276 // update the top fields, we'll do a storestore to make sure that
277 // no thread sees the update to top before the zeroing of the
278 // object header and the BOT initialization.
279 OrderAccess::storestore();
280
281 // Now, we will update the top fields of the "continues humongous"
282 // regions except the last one.
283 for (uint i = first; i < last; ++i) {
284 hr = region_at(i);
285 hr->set_top(hr->end());
286 }
287
288 hr = region_at(last);
289 // If we cannot fit a filler object, we must set top to the end
339 // Policy: Try only empty regions (i.e. already committed first). Maybe we
340 // are lucky enough to find some.
341 first = _hrm->find_contiguous_only_empty(obj_regions);
342 if (first != G1_NO_HRM_INDEX) {
343 _hrm->allocate_free_regions_starting_at(first, obj_regions);
344 }
345 }
346
347 if (first == G1_NO_HRM_INDEX) {
348 // Policy: We could not find enough regions for the humongous object in the
349 // free list. Look through the heap to find a mix of free and uncommitted regions.
350 // If so, try expansion.
351 first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
352 if (first != G1_NO_HRM_INDEX) {
353 // We found something. Make sure these regions are committed, i.e. expand
354 // the heap. Alternatively we could do a defragmentation GC.
355 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
356 word_size * HeapWordSize);
357
358 _hrm->expand_at(first, obj_regions, workers());
359 policy()->record_new_heap_size(num_regions());
360
361 #ifdef ASSERT
362 for (uint i = first; i < first + obj_regions; ++i) {
363 HeapRegion* hr = region_at(i);
364 assert(hr->is_free(), "sanity");
365 assert(hr->is_empty(), "sanity");
366 assert(is_on_master_free_list(hr), "sanity");
367 }
368 #endif
369 _hrm->allocate_free_regions_starting_at(first, obj_regions);
370 } else {
371 // Policy: Potentially trigger a defragmentation GC.
372 }
373 }
374
375 HeapWord* result = NULL;
376 if (first != G1_NO_HRM_INDEX) {
377 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
378 assert(result != NULL, "it should always return a valid result");
379
423
424 // We will loop until a) we manage to successfully perform the
425 // allocation or b) we successfully schedule a collection which
426 // fails to perform the allocation. b) is the only case when we'll
427 // return NULL.
428 HeapWord* result = NULL;
429 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
430 bool should_try_gc;
431 uint gc_count_before;
432
433 {
434 MutexLockerEx x(Heap_lock);
435 result = _allocator->attempt_allocation_locked(word_size);
436 if (result != NULL) {
437 return result;
438 }
439
440 // If the GCLocker is active and we are bound for a GC, try expanding young gen.
441 // This is different to when only GCLocker::needs_gc() is set: try to avoid
442 // waiting because the GCLocker is active to not wait too long.
443 if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
444 // No need for an ergo message here, can_expand_young_list() does this when
445 // it returns true.
446 result = _allocator->attempt_allocation_force(word_size);
447 if (result != NULL) {
448 return result;
449 }
450 }
451 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
452 // the GCLocker initiated GC has been performed and then retry. This includes
453 // the case when the GC Locker is not active but has not been performed.
454 should_try_gc = !GCLocker::needs_gc();
455 // Read the GC count while still holding the Heap_lock.
456 gc_count_before = total_collections();
457 }
458
459 if (should_try_gc) {
460 bool succeeded;
461 result = do_collection_pause(word_size, gc_count_before, &succeeded,
462 GCCause::_g1_inc_collection_pause);
463 if (result != NULL) {
844 // The structure of this method has a lot of similarities to
845 // attempt_allocation_slow(). The reason these two were not merged
846 // into a single one is that such a method would require several "if
847 // allocation is not humongous do this, otherwise do that"
848 // conditional paths which would obscure its flow. In fact, an early
849 // version of this code did use a unified method which was harder to
850 // follow and, as a result, it had subtle bugs that were hard to
851 // track down. So keeping these two methods separate allows each to
852 // be more readable. It will be good to keep these two in sync as
853 // much as possible.
854
855 assert_heap_not_locked_and_not_at_safepoint();
856 assert(is_humongous(word_size), "attempt_allocation_humongous() "
857 "should only be called for humongous allocations");
858
859 // Humongous objects can exhaust the heap quickly, so we should check if we
860 // need to start a marking cycle at each humongous object allocation. We do
861 // the check before we do the actual allocation. The reason for doing it
862 // before the allocation is that we avoid having to keep track of the newly
863 // allocated memory while we do a GC.
864 if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
865 word_size)) {
866 collect(GCCause::_g1_humongous_allocation);
867 }
868
869 // We will loop until a) we manage to successfully perform the
870 // allocation or b) we successfully schedule a collection which
871 // fails to perform the allocation. b) is the only case when we'll
872 // return NULL.
873 HeapWord* result = NULL;
874 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
875 bool should_try_gc;
876 uint gc_count_before;
877
878
879 {
880 MutexLockerEx x(Heap_lock);
881
882 // Given that humongous objects are not allocated in young
883 // regions, we'll first try to do the allocation without doing a
884 // collection hoping that there's enough space in the heap.
885 result = humongous_obj_allocate(word_size);
886 if (result != NULL) {
887 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
888 policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
889 return result;
890 }
891
892 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
893 // the GCLocker initiated GC has been performed and then retry. This includes
894 // the case when the GC Locker is not active but has not been performed.
895 should_try_gc = !GCLocker::needs_gc();
896 // Read the GC count while still holding the Heap_lock.
897 gc_count_before = total_collections();
898 }
899
900 if (should_try_gc) {
901 bool succeeded;
902 result = do_collection_pause(word_size, gc_count_before, &succeeded,
903 GCCause::_g1_humongous_allocation);
904 if (result != NULL) {
905 assert(succeeded, "only way to get back a non-NULL result");
906 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
907 Thread::current()->name(), p2i(result));
908 return result;
946 (try_count % QueuedAllocationWarningCount == 0)) {
947 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
948 Thread::current()->name(), try_count, word_size);
949 }
950 }
951
952 ShouldNotReachHere();
953 return NULL;
954 }
955
956 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
957 bool expect_null_mutator_alloc_region) {
958 assert_at_safepoint_on_vm_thread();
959 assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
960 "the current alloc region was unexpectedly found to be non-NULL");
961
962 if (!is_humongous(word_size)) {
963 return _allocator->attempt_allocation_locked(word_size);
964 } else {
965 HeapWord* result = humongous_obj_allocate(word_size);
966 if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
967 collector_state()->set_initiate_conc_mark_if_possible(true);
968 }
969 return result;
970 }
971
972 ShouldNotReachHere();
973 }
974
975 class PostCompactionPrinterClosure: public HeapRegionClosure {
976 private:
977 G1HRPrinter* _hr_printer;
978 public:
979 bool do_heap_region(HeapRegion* hr) {
980 assert(!hr->is_young(), "not expecting to find young regions");
981 _hr_printer->post_compaction(hr);
982 return false;
983 }
984
985 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
986 : _hr_printer(hr_printer) { }
1346 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1347 expand_bytes, aligned_expand_bytes);
1348
1349 if (is_maximal_no_gc()) {
1350 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1351 return false;
1352 }
1353
1354 double expand_heap_start_time_sec = os::elapsedTime();
1355 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1356 assert(regions_to_expand > 0, "Must expand by at least one region");
1357
1358 uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1359 if (expand_time_ms != NULL) {
1360 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1361 }
1362
1363 if (expanded_by > 0) {
1364 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1365 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1366 policy()->record_new_heap_size(num_regions());
1367 } else {
1368 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1369
1370 // The expansion of the virtual storage space was unsuccessful.
1371 // Let's see if it was because we ran out of swap.
1372 if (G1ExitOnExpansionFailure &&
1373 _hrm->available() >= regions_to_expand) {
1374 // We had head room...
1375 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1376 }
1377 }
1378 return regions_to_expand > 0;
1379 }
1380
1381 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1382 size_t aligned_shrink_bytes =
1383 ReservedSpace::page_align_size_down(shrink_bytes);
1384 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1385 HeapRegion::GrainBytes);
1386 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1387
1388 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1389 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1390
1391
1392 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1393 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1394 if (num_regions_removed > 0) {
1395 policy()->record_new_heap_size(num_regions());
1396 } else {
1397 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1398 }
1399 }
1400
1401 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1402 _verifier->verify_region_sets_optional();
1403
1404 // We should only reach here at the end of a Full GC or during Remark which
1405 // means we should not not be holding to any GC alloc regions. The method
1406 // below will make sure of that and do any remaining clean up.
1407 _allocator->abandon_gc_alloc_regions();
1408
1409 // Instead of tearing down / rebuilding the free lists here, we
1410 // could instead use the remove_all_pending() method on free_list to
1411 // remove only the ones that we need to remove.
1412 tear_down_region_sets(true /* free_list_only */);
1413 shrink_helper(shrink_bytes);
1414 rebuild_region_sets(true /* free_list_only */);
1415
1493 _listener(),
1494 _hrm(NULL),
1495 _allocator(NULL),
1496 _verifier(NULL),
1497 _summary_bytes_used(0),
1498 _archive_allocator(NULL),
1499 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1500 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1501 _expand_heap_after_alloc_failure(true),
1502 _g1mm(NULL),
1503 _humongous_reclaim_candidates(),
1504 _has_humongous_reclaim_candidates(false),
1505 _hr_printer(),
1506 _collector_state(),
1507 _old_marking_cycles_started(0),
1508 _old_marking_cycles_completed(0),
1509 _eden(),
1510 _survivor(),
1511 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1512 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1513 _policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
1514 _heap_sizing_policy(NULL),
1515 _collection_set(this, _policy),
1516 _hot_card_cache(NULL),
1517 _rem_set(NULL),
1518 _dirty_card_queue_set(false),
1519 _cm(NULL),
1520 _cm_thread(NULL),
1521 _cr(NULL),
1522 _task_queues(NULL),
1523 _evacuation_failed(false),
1524 _evacuation_failed_info_array(NULL),
1525 _preserved_marks_set(true /* in_c_heap */),
1526 #ifndef PRODUCT
1527 _evacuation_failure_alot_for_current_gc(false),
1528 _evacuation_failure_alot_gc_number(0),
1529 _evacuation_failure_alot_count(0),
1530 #endif
1531 _ref_processor_stw(NULL),
1532 _is_alive_closure_stw(this),
1533 _is_subject_to_discovery_stw(this),
1534 _ref_processor_cm(NULL),
1535 _is_alive_closure_cm(this),
1536 _is_subject_to_discovery_cm(this),
1537 _in_cset_fast_test() {
1538
1539 _verifier = new G1HeapVerifier(this);
1540
1541 _allocator = new G1Allocator(this);
1542
1543 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
1544
1545 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1546
1547 // Override the default _filler_array_max_size so that no humongous filler
1548 // objects are created.
1549 _filler_array_max_size = _humongous_object_threshold_in_words;
1550
1551 uint n_queues = ParallelGCThreads;
1552 _task_queues = new RefToScanQueueSet(n_queues);
1553
1554 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1555
1556 for (uint i = 0; i < n_queues; i++) {
1557 RefToScanQueue* q = new RefToScanQueue();
1558 q->initialize();
1559 _task_queues->register_queue(i, q);
1560 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1561 }
1562
1563 // Initialize the G1EvacuationFailureALot counters and flags.
1620 return JNI_ENOMEM;
1621 }
1622 return JNI_OK;
1623 }
1624
1625 jint G1CollectedHeap::initialize() {
1626 os::enable_vtime();
1627
1628 // Necessary to satisfy locking discipline assertions.
1629
1630 MutexLocker x(Heap_lock);
1631
1632 // While there are no constraints in the GC code that HeapWordSize
1633 // be any particular value, there are multiple other areas in the
1634 // system which believe this to be true (e.g. oop->object_size in some
1635 // cases incorrectly returns the size in wordSize units rather than
1636 // HeapWordSize).
1637 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1638
1639 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1640 size_t max_byte_size = _collector_policy->heap_reserved_size_bytes();
1641 size_t heap_alignment = collector_policy()->heap_alignment();
1642
1643 // Ensure that the sizes are properly aligned.
1644 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1645 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1646 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1647
1648 // Reserve the maximum.
1649
1650 // When compressed oops are enabled, the preferred heap base
1651 // is calculated by subtracting the requested size from the
1652 // 32Gb boundary and using the result as the base address for
1653 // heap reservation. If the requested size is not aligned to
1654 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1655 // into the ReservedHeapSpace constructor) then the actual
1656 // base of the reserved heap may end up differing from the
1657 // address that was requested (i.e. the preferred heap base).
1658 // If this happens then we could end up using a non-optimal
1659 // compressed oops mode.
1660
1721 create_aux_memory_mapper("Block Offset Table",
1722 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1723 G1BlockOffsetTable::heap_map_factor());
1724
1725 G1RegionToSpaceMapper* cardtable_storage =
1726 create_aux_memory_mapper("Card Table",
1727 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1728 G1CardTable::heap_map_factor());
1729
1730 G1RegionToSpaceMapper* card_counts_storage =
1731 create_aux_memory_mapper("Card Counts Table",
1732 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1733 G1CardCounts::heap_map_factor());
1734
1735 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1736 G1RegionToSpaceMapper* prev_bitmap_storage =
1737 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1738 G1RegionToSpaceMapper* next_bitmap_storage =
1739 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1740
1741 _hrm = HeapRegionManager::create_manager(this, _collector_policy);
1742
1743 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1744 _card_table->initialize(cardtable_storage);
1745 // Do later initialization work for concurrent refinement.
1746 _hot_card_cache->initialize(card_counts_storage);
1747
1748 // 6843694 - ensure that the maximum region index can fit
1749 // in the remembered set structures.
1750 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1751 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1752
1753 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1754 // start within the first card.
1755 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1756 // Also create a G1 rem set.
1757 _rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1758 _rem_set->initialize(max_reserved_capacity(), max_regions());
1759
1760 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1761 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1762 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1763 "too many cards per region");
1764
1765 FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1766
1767 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1768
1769 {
1770 HeapWord* start = _hrm->reserved().start();
1771 HeapWord* end = _hrm->reserved().end();
1772 size_t granularity = HeapRegion::GrainBytes;
1773
1774 _in_cset_fast_test.initialize(start, end, granularity);
1775 _humongous_reclaim_candidates.initialize(start, end, granularity);
1776 }
1777
1778 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1782 return JNI_ENOMEM;
1783 }
1784 _workers->initialize_workers();
1785
1786 // Create the G1ConcurrentMark data structure and thread.
1787 // (Must do this late, so that "max_regions" is defined.)
1788 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1789 if (_cm == NULL || !_cm->completed_initialization()) {
1790 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1791 return JNI_ENOMEM;
1792 }
1793 _cm_thread = _cm->cm_thread();
1794
1795 // Now expand into the initial heap size.
1796 if (!expand(init_byte_size, _workers)) {
1797 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1798 return JNI_ENOMEM;
1799 }
1800
1801 // Perform any initialization actions delegated to the policy.
1802 policy()->init(this, &_collection_set);
1803
1804 jint ecode = initialize_concurrent_refinement();
1805 if (ecode != JNI_OK) {
1806 return ecode;
1807 }
1808
1809 ecode = initialize_young_gen_sampling_thread();
1810 if (ecode != JNI_OK) {
1811 return ecode;
1812 }
1813
1814 {
1815 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1816 dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
1817 dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
1818 }
1819
1820 // Here we allocate the dummy HeapRegion that is required by the
1821 // G1AllocRegion class.
1822 HeapRegion* dummy_region = _hrm->get_dummy_region();
1922 false, // Reference discovery is not atomic
1923 &_is_alive_closure_cm, // is alive closure
1924 true); // allow changes to number of processing threads
1925
1926 // STW ref processor
1927 _ref_processor_stw =
1928 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1929 mt_processing, // mt processing
1930 ParallelGCThreads, // degree of mt processing
1931 (ParallelGCThreads > 1), // mt discovery
1932 ParallelGCThreads, // degree of mt discovery
1933 true, // Reference discovery is atomic
1934 &_is_alive_closure_stw, // is alive closure
1935 true); // allow changes to number of processing threads
1936 }
1937
1938 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1939 return _collector_policy;
1940 }
1941
1942 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1943 return &_soft_ref_policy;
1944 }
1945
1946 size_t G1CollectedHeap::capacity() const {
1947 return _hrm->length() * HeapRegion::GrainBytes;
1948 }
1949
1950 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1951 return _hrm->total_free_bytes();
1952 }
1953
1954 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1955 _hot_card_cache->drain(cl, worker_i);
1956 }
1957
1958 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1959 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1960 size_t n_completed_buffers = 0;
1961 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1962 n_completed_buffers++;
1963 }
1964 assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
1965 policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1966 }
1967
1968 // Computes the sum of the storage used by the various regions.
1969 size_t G1CollectedHeap::used() const {
1970 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1971 if (_archive_allocator != NULL) {
1972 result += _archive_allocator->used();
1973 }
1974 return result;
1975 }
1976
1977 size_t G1CollectedHeap::used_unlocked() const {
1978 return _summary_bytes_used;
1979 }
1980
1981 class SumUsedClosure: public HeapRegionClosure {
1982 size_t _used;
1983 public:
1984 SumUsedClosure() : _used(0) {}
1985 bool do_heap_region(HeapRegion* r) {
1997
1998 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1999 switch (cause) {
2000 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2001 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2002 case GCCause::_wb_conc_mark: return true;
2003 default : return false;
2004 }
2005 }
2006
2007 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2008 switch (cause) {
2009 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2010 case GCCause::_g1_humongous_allocation: return true;
2011 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2012 default: return is_user_requested_concurrent_full_gc(cause);
2013 }
2014 }
2015
2016 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
2017 if(policy()->force_upgrade_to_full()) {
2018 return true;
2019 } else if (should_do_concurrent_full_gc(_gc_cause)) {
2020 return false;
2021 } else if (has_regions_left_for_allocation()) {
2022 return false;
2023 } else {
2024 return true;
2025 }
2026 }
2027
2028 #ifndef PRODUCT
2029 void G1CollectedHeap::allocate_dummy_regions() {
2030 // Let's fill up most of the region
2031 size_t word_size = HeapRegion::GrainWords - 1024;
2032 // And as a result the region we'll allocate will be humongous.
2033 guarantee(is_humongous(word_size), "sanity");
2034
2035 // _filler_array_max_size is set to humongous object threshold
2036 // but temporarily change it to use CollectedHeap::fill_with_object().
2037 SizeTFlagSetting fs(_filler_array_max_size, word_size);
2125 uint old_marking_count_before;
2126 uint full_gc_count_before;
2127
2128 {
2129 MutexLocker ml(Heap_lock);
2130
2131 // Read the GC count while holding the Heap_lock
2132 gc_count_before = total_collections();
2133 full_gc_count_before = total_full_collections();
2134 old_marking_count_before = _old_marking_cycles_started;
2135 }
2136
2137 if (should_do_concurrent_full_gc(cause)) {
2138 // Schedule an initial-mark evacuation pause that will start a
2139 // concurrent cycle. We're setting word_size to 0 which means that
2140 // we are not requesting a post-GC allocation.
2141 VM_G1CollectForAllocation op(0, /* word_size */
2142 gc_count_before,
2143 cause,
2144 true, /* should_initiate_conc_mark */
2145 policy()->max_pause_time_ms());
2146 VMThread::execute(&op);
2147 vmop_succeeded = op.pause_succeeded();
2148 if (!vmop_succeeded && retry_on_vmop_failure) {
2149 if (old_marking_count_before == _old_marking_cycles_started) {
2150 should_retry_vmop = op.should_retry_gc();
2151 } else {
2152 // A Full GC happened while we were trying to schedule the
2153 // concurrent cycle. No point in starting a new cycle given
2154 // that the whole heap was collected anyway.
2155 }
2156
2157 if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) {
2158 GCLocker::stall_until_clear();
2159 }
2160 }
2161 } else {
2162 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2163 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2164
2165 // Schedule a standard evacuation pause. We're setting word_size
2166 // to 0 which means that we are not requesting a post-GC allocation.
2167 VM_G1CollectForAllocation op(0, /* word_size */
2168 gc_count_before,
2169 cause,
2170 false, /* should_initiate_conc_mark */
2171 policy()->max_pause_time_ms());
2172 VMThread::execute(&op);
2173 vmop_succeeded = op.pause_succeeded();
2174 } else {
2175 // Schedule a Full GC.
2176 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2177 VMThread::execute(&op);
2178 vmop_succeeded = op.pause_succeeded();
2179 }
2180 }
2181 } while (should_retry_vmop);
2182 return vmop_succeeded;
2183 }
2184
2185 bool G1CollectedHeap::is_in(const void* p) const {
2186 if (_hrm->reserved().contains(p)) {
2187 // Given that we know that p is in the reserved space,
2188 // heap_region_containing() should successfully
2189 // return the containing region.
2190 HeapRegion* hr = heap_region_containing(p);
2191 return hr->is_in(p);
2253 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2254 HeapRegion* hr = heap_region_containing(addr);
2255 return hr->block_start(addr);
2256 }
2257
2258 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2259 HeapRegion* hr = heap_region_containing(addr);
2260 return hr->block_size(addr);
2261 }
2262
2263 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2264 HeapRegion* hr = heap_region_containing(addr);
2265 return hr->block_is_obj(addr);
2266 }
2267
2268 bool G1CollectedHeap::supports_tlab_allocation() const {
2269 return true;
2270 }
2271
2272 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2273 return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2274 }
2275
2276 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2277 return _eden.length() * HeapRegion::GrainBytes;
2278 }
2279
2280 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2281 // must be equal to the humongous object limit.
2282 size_t G1CollectedHeap::max_tlab_size() const {
2283 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2284 }
2285
2286 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2287 return _allocator->unsafe_max_tlab_alloc();
2288 }
2289
2290 size_t G1CollectedHeap::max_capacity() const {
2291 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2292 }
2293
2294 size_t G1CollectedHeap::max_reserved_capacity() const {
2295 return _hrm->max_length() * HeapRegion::GrainBytes;
2296 }
2297
2298 jlong G1CollectedHeap::millis_since_last_gc() {
2299 // See the notes in GenCollectedHeap::millis_since_last_gc()
2300 // for more information about the implementation.
2301 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2302 _policy->collection_pause_end_millis();
2303 if (ret_val < 0) {
2304 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2305 ". returning zero instead.", ret_val);
2306 return 0;
2307 }
2308 return ret_val;
2309 }
2310
2311 void G1CollectedHeap::deduplicate_string(oop str) {
2312 assert(java_lang_String::is_instance(str), "invariant");
2313
2314 if (G1StringDedup::is_enabled()) {
2315 G1StringDedup::deduplicate(str);
2316 }
2317 }
2318
2319 void G1CollectedHeap::prepare_for_verify() {
2320 _verifier->prepare_for_verify();
2321 }
2322
2323 void G1CollectedHeap::verify(VerifyOption vo) {
2324 _verifier->verify(vo);
2325 }
2326
2327 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2328 return true;
2329 }
2330
2331 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2332 return _cm_thread->request_concurrent_phase(phase);
2333 }
2334
2335 bool G1CollectedHeap::is_heap_heterogeneous() const {
2336 return _collector_policy->is_heap_heterogeneous();
2337 }
2338
2339 class PrintRegionClosure: public HeapRegionClosure {
2340 outputStream* _st;
2341 public:
2342 PrintRegionClosure(outputStream* st) : _st(st) {}
2343 bool do_heap_region(HeapRegion* r) {
2344 r->print_on(_st);
2345 return false;
2346 }
2347 };
2348
2349 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2350 const HeapRegion* hr,
2351 const VerifyOption vo) const {
2352 switch (vo) {
2353 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2354 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2355 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2356 default: ShouldNotReachHere();
2357 }
2358 return false; // keep some compilers happy
2428 _cm->print_worker_threads_on(st);
2429 _cr->print_threads_on(st);
2430 _young_gen_sampling_thread->print_on(st);
2431 if (G1StringDedup::is_enabled()) {
2432 G1StringDedup::print_worker_threads_on(st);
2433 }
2434 }
2435
2436 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2437 workers()->threads_do(tc);
2438 tc->do_thread(_cm_thread);
2439 _cm->threads_do(tc);
2440 _cr->threads_do(tc);
2441 tc->do_thread(_young_gen_sampling_thread);
2442 if (G1StringDedup::is_enabled()) {
2443 G1StringDedup::threads_do(tc);
2444 }
2445 }
2446
2447 void G1CollectedHeap::print_tracing_info() const {
2448 rem_set()->print_summary_info();
2449 concurrent_mark()->print_summary_info();
2450 }
2451
2452 #ifndef PRODUCT
2453 // Helpful for debugging RSet issues.
2454
2455 class PrintRSetsClosure : public HeapRegionClosure {
2456 private:
2457 const char* _msg;
2458 size_t _occupied_sum;
2459
2460 public:
2461 bool do_heap_region(HeapRegion* r) {
2462 HeapRegionRemSet* hrrs = r->rem_set();
2463 size_t occupied = hrrs->occupied();
2464 _occupied_sum += occupied;
2465
2466 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2467 if (occupied == 0) {
2468 tty->print_cr(" RSet is empty");
2488 };
2489
2490 void G1CollectedHeap::print_cset_rsets() {
2491 PrintRSetsClosure cl("Printing CSet RSets");
2492 collection_set_iterate(&cl);
2493 }
2494
2495 void G1CollectedHeap::print_all_rsets() {
2496 PrintRSetsClosure cl("Printing All RSets");;
2497 heap_region_iterate(&cl);
2498 }
2499 #endif // PRODUCT
2500
2501 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2502
2503 size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2504 size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2505 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2506
2507 size_t eden_capacity_bytes =
2508 (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2509
2510 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2511 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2512 eden_capacity_bytes, survivor_used_bytes, num_regions());
2513 }
2514
2515 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2516 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2517 stats->unused(), stats->used(), stats->region_end_waste(),
2518 stats->regions_filled(), stats->direct_allocated(),
2519 stats->failure_used(), stats->failure_waste());
2520 }
2521
2522 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2523 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2524 gc_tracer->report_gc_heap_summary(when, heap_summary);
2525
2526 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2527 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2528 }
2529
2530 G1CollectedHeap* G1CollectedHeap::heap() {
2531 CollectedHeap* heap = Universe::heap();
2532 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2533 assert(heap->kind() == CollectedHeap::G1, "Invalid name");
2534 return (G1CollectedHeap*)heap;
2535 }
2536
2537 void G1CollectedHeap::gc_prologue(bool full) {
2538 // always_do_update_barrier = false;
2539 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2540
2541 // This summary needs to be printed before incrementing total collections.
2542 rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2543
2544 // Update common counters.
2545 increment_total_collections(full /* full gc */);
2546 if (full) {
2547 increment_old_marking_cycles_started();
2548 }
2549
2550 // Fill TLAB's and such
2551 double start = os::elapsedTime();
2552 ensure_parsability(true);
2553 policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2554 }
2555
2556 void G1CollectedHeap::gc_epilogue(bool full) {
2557 // Update common counters.
2558 if (full) {
2559 // Update the number of full collections that have been completed.
2560 increment_old_marking_cycles_completed(false /* concurrent */);
2561 }
2562
2563 // We are at the end of the GC. Total collections has already been increased.
2564 rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2565
2566 // FIXME: what is this about?
2567 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2568 // is set.
2569 #if COMPILER2_OR_JVMCI
2570 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2571 #endif
2572 // always_do_update_barrier = true;
2573
2574 double start = os::elapsedTime();
2575 resize_all_tlabs();
2576 policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2577
2578 MemoryService::track_memory_usage();
2579 // We have just completed a GC. Update the soft reference
2580 // policy with the new heap occupancy
2581 Universe::update_heap_info_at_gc();
2582 }
2583
2584 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2585 uint gc_count_before,
2586 bool* succeeded,
2587 GCCause::Cause gc_cause) {
2588 assert_heap_not_locked_and_not_at_safepoint();
2589 VM_G1CollectForAllocation op(word_size,
2590 gc_count_before,
2591 gc_cause,
2592 false, /* should_initiate_conc_mark */
2593 policy()->max_pause_time_ms());
2594 VMThread::execute(&op);
2595
2596 HeapWord* result = op.result();
2597 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2598 assert(result == NULL || ret_succeeded,
2599 "the result should be NULL if the VM did not succeed");
2600 *succeeded = ret_succeeded;
2601
2602 assert_heap_not_locked();
2603 return result;
2604 }
2605
2606 void G1CollectedHeap::do_concurrent_mark() {
2607 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2608 if (!_cm_thread->in_progress()) {
2609 _cm_thread->set_started();
2610 CGC_lock->notify();
2611 }
2612 }
2613
2750 // Clear_locked() above sets the state to Empty. However we want to continue
2751 // collecting remembered set entries for humongous regions that were not
2752 // reclaimed.
2753 r->rem_set()->set_state_complete();
2754 }
2755 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2756 }
2757 _total_humongous++;
2758
2759 return false;
2760 }
2761
2762 size_t total_humongous() const { return _total_humongous; }
2763 size_t candidate_humongous() const { return _candidate_humongous; }
2764
2765 void flush_rem_set_entries() { _dcq.flush(); }
2766 };
2767
2768 void G1CollectedHeap::register_humongous_regions_with_cset() {
2769 if (!G1EagerReclaimHumongousObjects) {
2770 policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2771 return;
2772 }
2773 double time = os::elapsed_counter();
2774
2775 // Collect reclaim candidate information and register candidates with cset.
2776 RegisterHumongousWithInCSetFastTestClosure cl;
2777 heap_region_iterate(&cl);
2778
2779 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2780 policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2781 cl.total_humongous(),
2782 cl.candidate_humongous());
2783 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2784
2785 // Finally flush all remembered set entries to re-check into the global DCQS.
2786 cl.flush_rem_set_entries();
2787 }
2788
2789 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2790 public:
2791 bool do_heap_region(HeapRegion* hr) {
2792 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2793 hr->verify_rem_set();
2794 }
2795 return false;
2796 }
2797 };
2798
2799 uint G1CollectedHeap::num_task_queues() const {
2800 return _task_queues->size();
2832 void G1CollectedHeap::reset_taskqueue_stats() {
2833 const uint n = num_task_queues();
2834 for (uint i = 0; i < n; ++i) {
2835 task_queue(i)->stats.reset();
2836 }
2837 }
2838 #endif // TASKQUEUE_STATS
2839
2840 void G1CollectedHeap::wait_for_root_region_scanning() {
2841 double scan_wait_start = os::elapsedTime();
2842 // We have to wait until the CM threads finish scanning the
2843 // root regions as it's the only way to ensure that all the
2844 // objects on them have been correctly scanned before we start
2845 // moving them during the GC.
2846 bool waited = _cm->root_regions()->wait_until_scan_finished();
2847 double wait_time_ms = 0.0;
2848 if (waited) {
2849 double scan_wait_end = os::elapsedTime();
2850 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2851 }
2852 policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2853 }
2854
2855 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2856 private:
2857 G1HRPrinter* _hr_printer;
2858 public:
2859 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2860
2861 virtual bool do_heap_region(HeapRegion* r) {
2862 _hr_printer->cset(r);
2863 return false;
2864 }
2865 };
2866
2867 void G1CollectedHeap::start_new_collection_set() {
2868 collection_set()->start_incremental_building();
2869
2870 clear_cset_fast_test();
2871
2872 guarantee(_eden.length() == 0, "eden should have been cleared");
2873 policy()->transfer_survivors_to_cset(survivor());
2874 }
2875
2876 bool
2877 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2878 assert_at_safepoint_on_vm_thread();
2879 guarantee(!is_gc_active(), "collection is not reentrant");
2880
2881 if (GCLocker::check_active_before_gc()) {
2882 return false;
2883 }
2884
2885 _gc_timer_stw->register_gc_start();
2886
2887 GCIdMark gc_id_mark;
2888 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2889
2890 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2891 ResourceMark rm;
2892
2893 policy()->note_gc_start();
2894
2895 wait_for_root_region_scanning();
2896
2897 print_heap_before_gc();
2898 print_heap_regions();
2899 trace_heap_before_gc(_gc_tracer_stw);
2900
2901 _verifier->verify_region_sets_optional();
2902 _verifier->verify_dirty_young_regions();
2903
2904 // We should not be doing initial mark unless the conc mark thread is running
2905 if (!_cm_thread->should_terminate()) {
2906 // This call will decide whether this pause is an initial-mark
2907 // pause. If it is, in_initial_mark_gc() will return true
2908 // for the duration of this pause.
2909 policy()->decide_on_conc_mark_initiation();
2910 }
2911
2912 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2913 assert(!collector_state()->in_initial_mark_gc() ||
2914 collector_state()->in_young_only_phase(), "sanity");
2915
2916 // We also do not allow mixed GCs during marking.
2917 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2918
2919 // Record whether this pause is an initial mark. When the current
2920 // thread has completed its logging output and it's safe to signal
2921 // the CM thread, the flag's value in the policy has been reset.
2922 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2923
2924 // Inner scope for scope based logging, timers, and stats collection
2925 {
2926 G1EvacuationInfo evacuation_info;
2927
2928 if (collector_state()->in_initial_mark_gc()) {
2929 // We are about to start a marking cycle, so we increment the
3001 // We want to temporarily turn off discovery by the
3002 // CM ref processor, if necessary, and turn it back on
3003 // on again later if we do. Using a scoped
3004 // NoRefDiscovery object will do this.
3005 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3006
3007 // Forget the current alloc region (we might even choose it to be part
3008 // of the collection set!).
3009 _allocator->release_mutator_alloc_region();
3010
3011 // This timing is only used by the ergonomics to handle our pause target.
3012 // It is unclear why this should not include the full pause. We will
3013 // investigate this in CR 7178365.
3014 //
3015 // Preserving the old comment here if that helps the investigation:
3016 //
3017 // The elapsed time induced by the start time below deliberately elides
3018 // the possible verification above.
3019 double sample_start_time_sec = os::elapsedTime();
3020
3021 policy()->record_collection_pause_start(sample_start_time_sec);
3022
3023 if (collector_state()->in_initial_mark_gc()) {
3024 concurrent_mark()->pre_initial_mark();
3025 }
3026
3027 policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
3028
3029 evacuation_info.set_collectionset_regions(collection_set()->region_length());
3030
3031 register_humongous_regions_with_cset();
3032
3033 assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3034
3035 // We call this after finalize_cset() to
3036 // ensure that the CSet has been finalized.
3037 _cm->verify_no_cset_oops();
3038
3039 if (_hr_printer.is_active()) {
3040 G1PrintCollectionSetClosure cl(&_hr_printer);
3041 _collection_set.iterate(&cl);
3042 }
3043
3044 // Initialize the GC alloc regions.
3045 _allocator->init_gc_alloc_regions(evacuation_info);
3046
3047 G1ParScanThreadStateSet per_thread_states(this,
3050 collection_set()->optional_region_length());
3051 pre_evacuate_collection_set();
3052
3053 // Actually do the work...
3054 evacuate_collection_set(&per_thread_states);
3055 evacuate_optional_collection_set(&per_thread_states);
3056
3057 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3058
3059 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3060 free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3061
3062 eagerly_reclaim_humongous_regions();
3063
3064 record_obj_copy_mem_stats();
3065 _survivor_evac_stats.adjust_desired_plab_sz();
3066 _old_evac_stats.adjust_desired_plab_sz();
3067
3068 double start = os::elapsedTime();
3069 start_new_collection_set();
3070 policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3071
3072 if (evacuation_failed()) {
3073 double recalculate_used_start = os::elapsedTime();
3074 set_used(recalculate_used());
3075 policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
3076
3077 if (_archive_allocator != NULL) {
3078 _archive_allocator->clear_used();
3079 }
3080 for (uint i = 0; i < ParallelGCThreads; i++) {
3081 if (_evacuation_failed_info_array[i].has_failed()) {
3082 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3083 }
3084 }
3085 } else {
3086 // The "used" of the the collection set have already been subtracted
3087 // when they were freed. Add in the bytes evacuated.
3088 increase_used(policy()->bytes_copied_during_gc());
3089 }
3090
3091 if (collector_state()->in_initial_mark_gc()) {
3092 // We have to do this before we notify the CM threads that
3093 // they can start working to make sure that all the
3094 // appropriate initialization is done on the CM object.
3095 concurrent_mark()->post_initial_mark();
3096 // Note that we don't actually trigger the CM thread at
3097 // this point. We do that later when we're sure that
3098 // the current thread has completed its logging output.
3099 }
3100
3101 allocate_dummy_regions();
3102
3103 _allocator->init_mutator_alloc_region();
3104
3105 {
3106 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3107 if (expand_bytes > 0) {
3108 size_t bytes_before = capacity();
3109 // No need for an ergo logging here,
3110 // expansion_amount() does this when it returns a value > 0.
3111 double expand_ms;
3112 if (!expand(expand_bytes, _workers, &expand_ms)) {
3113 // We failed to expand the heap. Cannot do anything about it.
3114 }
3115 policy()->phase_times()->record_expand_heap_time(expand_ms);
3116 }
3117 }
3118
3119 // We redo the verification but now wrt to the new CSet which
3120 // has just got initialized after the previous CSet was freed.
3121 _cm->verify_no_cset_oops();
3122
3123 // This timing is only used by the ergonomics to handle our pause target.
3124 // It is unclear why this should not include the full pause. We will
3125 // investigate this in CR 7178365.
3126 double sample_end_time_sec = os::elapsedTime();
3127 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3128 size_t total_cards_scanned = policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
3129 policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3130
3131 evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3132 evacuation_info.set_bytes_copied(policy()->bytes_copied_during_gc());
3133
3134 if (VerifyRememberedSets) {
3135 log_info(gc, verify)("[Verifying RemSets after GC]");
3136 VerifyRegionRemSetClosure v_cl;
3137 heap_region_iterate(&v_cl);
3138 }
3139
3140 _verifier->verify_after_gc(verify_type);
3141 _verifier->check_bitmaps("GC End");
3142
3143 assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
3144 _ref_processor_stw->verify_no_references_recorded();
3145
3146 // CM reference discovery will be re-enabled if necessary.
3147 }
3148
3149 #ifdef TRACESPINNING
3150 ParallelTaskTerminator::print_termination_counts();
3151 #endif
3152
3153 gc_epilogue(false);
3154 }
3155
3156 // Print the remainder of the GC log output.
3157 if (evacuation_failed()) {
3158 log_info(gc)("To-space exhausted");
3159 }
3160
3161 policy()->print_phases();
3162 heap_transition.print();
3163
3164 // It is not yet to safe to tell the concurrent mark to
3165 // start as we have some optional output below. We don't want the
3166 // output from the concurrent mark thread interfering with this
3167 // logging output either.
3168
3169 _hrm->verify_optional();
3170 _verifier->verify_region_sets_optional();
3171
3172 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3173 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3174
3175 print_heap_after_gc();
3176 print_heap_regions();
3177 trace_heap_after_gc(_gc_tracer_stw);
3178
3179 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3180 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3181 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3182 // before any GC notifications are raised.
3183 g1mm()->update_sizes();
3184
3185 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3186 _gc_tracer_stw->report_tenuring_threshold(_policy->tenuring_threshold());
3187 _gc_timer_stw->register_gc_end();
3188 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3189 }
3190 // It should now be safe to tell the concurrent mark thread to start
3191 // without its logging output interfering with the logging output
3192 // that came from the pause.
3193
3194 if (should_start_conc_mark) {
3195 // CAUTION: after the doConcurrentMark() call below,
3196 // the concurrent marking thread(s) could be running
3197 // concurrently with us. Make sure that anything after
3198 // this point does not assume that we are the only GC thread
3199 // running. Note: of course, the actual marking work will
3200 // not start until the safepoint itself is released in
3201 // SuspendibleThreadSet::desynchronize().
3202 do_concurrent_mark();
3203 }
3204
3205 return true;
3206 }
3207
3208 void G1CollectedHeap::remove_self_forwarding_pointers() {
3209 G1ParRemoveSelfForwardPtrsTask rsfp_task;
3210 workers()->run_task(&rsfp_task);
3211 }
3212
3213 void G1CollectedHeap::restore_after_evac_failure() {
3214 double remove_self_forwards_start = os::elapsedTime();
3215
3216 remove_self_forwarding_pointers();
3217 SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3218 _preserved_marks_set.restore(&task_executor);
3219
3220 policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3221 }
3222
3223 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3224 if (!_evacuation_failed) {
3225 _evacuation_failed = true;
3226 }
3227
3228 _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3229 _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3230 }
3231
3232 bool G1ParEvacuateFollowersClosure::offer_termination() {
3233 EventGCPhaseParallel event;
3234 G1ParScanThreadState* const pss = par_scan_state();
3235 start_term_time();
3236 const bool res = terminator()->offer_termination();
3237 end_term_time();
3238 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
3239 return res;
3240 }
3258 RefToScanQueueSet* _queues;
3259 G1RootProcessor* _root_processor;
3260 TaskTerminator _terminator;
3261 uint _n_workers;
3262
3263 public:
3264 G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3265 : AbstractGangTask("G1 collection"),
3266 _g1h(g1h),
3267 _pss(per_thread_states),
3268 _queues(task_queues),
3269 _root_processor(root_processor),
3270 _terminator(n_workers, _queues),
3271 _n_workers(n_workers)
3272 {}
3273
3274 void work(uint worker_id) {
3275 if (worker_id >= _n_workers) return; // no work needed this round
3276
3277 double start_sec = os::elapsedTime();
3278 _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3279
3280 {
3281 ResourceMark rm;
3282 HandleMark hm;
3283
3284 ReferenceProcessor* rp = _g1h->ref_processor_stw();
3285
3286 G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3287 pss->set_ref_discoverer(rp);
3288
3289 double start_strong_roots_sec = os::elapsedTime();
3290
3291 _root_processor->evacuate_roots(pss, worker_id);
3292
3293 _g1h->rem_set()->oops_into_collection_set_do(pss, worker_id);
3294
3295 double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3296
3297 double term_sec = 0.0;
3298 size_t evac_term_attempts = 0;
3299 {
3300 double start = os::elapsedTime();
3301 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
3302 evac.do_void();
3303
3304 evac_term_attempts = evac.term_attempts();
3305 term_sec = evac.term_time();
3306 double elapsed_sec = os::elapsedTime() - start;
3307
3308 G1GCPhaseTimes* p = _g1h->policy()->phase_times();
3309 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3310
3311 p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
3312 worker_id,
3313 pss->lab_waste_words() * HeapWordSize,
3314 G1GCPhaseTimes::ObjCopyLABWaste);
3315 p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
3316 worker_id,
3317 pss->lab_undo_waste_words() * HeapWordSize,
3318 G1GCPhaseTimes::ObjCopyLABUndoWaste);
3319
3320 p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3321 p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3322 }
3323
3324 assert(pss->queue_is_empty(), "should be empty");
3325
3326 // Close the inner scope so that the ResourceMark and HandleMark
3327 // destructors are executed here and are included as part of the
3328 // "GC Worker Time".
3329 }
3330 _g1h->policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3331 }
3332 };
3333
3334 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3335 bool class_unloading_occurred) {
3336 uint num_workers = workers()->active_workers();
3337 ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false);
3338 workers()->run_task(&unlink_task);
3339 }
3340
3341 // Clean string dedup data structures.
3342 // Ideally we would prefer to use a StringDedupCleaningTask here, but we want to
3343 // record the durations of the phases. Hence the almost-copy.
3344 class G1StringDedupCleaningTask : public AbstractGangTask {
3345 BoolObjectClosure* _is_alive;
3346 OopClosure* _keep_alive;
3347 G1GCPhaseTimes* _phase_times;
3348
3349 public:
3350 G1StringDedupCleaningTask(BoolObjectClosure* is_alive,
3375 }
3376 }
3377 };
3378
3379 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3380 OopClosure* keep_alive,
3381 G1GCPhaseTimes* phase_times) {
3382 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3383 workers()->run_task(&cl);
3384 }
3385
3386 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3387 private:
3388 G1DirtyCardQueueSet* _queue;
3389 G1CollectedHeap* _g1h;
3390 public:
3391 G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3392 _queue(queue), _g1h(g1h) { }
3393
3394 virtual void work(uint worker_id) {
3395 G1GCPhaseTimes* phase_times = _g1h->policy()->phase_times();
3396 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3397
3398 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3399 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3400
3401 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3402 }
3403 };
3404
3405 void G1CollectedHeap::redirty_logged_cards() {
3406 double redirty_logged_cards_start = os::elapsedTime();
3407
3408 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3409 dirty_card_queue_set().reset_for_par_iteration();
3410 workers()->run_task(&redirty_task);
3411
3412 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3413 dcq.merge_bufferlists(&dirty_card_queue_set());
3414 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3415
3416 policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3417 }
3418
3419 // Weak Reference Processing support
3420
3421 bool G1STWIsAliveClosure::do_object_b(oop p) {
3422 // An object is reachable if it is outside the collection set,
3423 // or is inside and copied.
3424 return !_g1h->is_in_cset(p) || p->is_forwarded();
3425 }
3426
3427 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3428 assert(obj != NULL, "must not be NULL");
3429 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3430 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3431 // may falsely indicate that this is not the case here: however the collection set only
3432 // contains old regions when concurrent mark is not running.
3433 return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3434 }
3435
3436 // Non Copying Keep Alive closure
3629
3630 // Even when parallel reference processing is enabled, the processing
3631 // of JNI refs is serial and performed serially by the current thread
3632 // rather than by a worker. The following PSS will be used for processing
3633 // JNI refs.
3634
3635 // Use only a single queue for this PSS.
3636 G1ParScanThreadState* pss = per_thread_states->state_for_worker(0);
3637 pss->set_ref_discoverer(NULL);
3638 assert(pss->queue_is_empty(), "pre-condition");
3639
3640 // Keep alive closure.
3641 G1CopyingKeepAliveClosure keep_alive(this, pss);
3642
3643 // Serial Complete GC closure
3644 G1STWDrainQueueClosure drain_queue(this, pss);
3645
3646 // Setup the soft refs policy...
3647 rp->setup_policy(false);
3648
3649 ReferenceProcessorPhaseTimes* pt = policy()->phase_times()->ref_phase_times();
3650
3651 ReferenceProcessorStats stats;
3652 if (!rp->processing_is_mt()) {
3653 // Serial reference processing...
3654 stats = rp->process_discovered_references(&is_alive,
3655 &keep_alive,
3656 &drain_queue,
3657 NULL,
3658 pt);
3659 } else {
3660 uint no_of_gc_workers = workers()->active_workers();
3661
3662 // Parallel reference processing
3663 assert(no_of_gc_workers <= rp->max_num_queues(),
3664 "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3665 no_of_gc_workers, rp->max_num_queues());
3666
3667 G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues);
3668 stats = rp->process_discovered_references(&is_alive,
3669 &keep_alive,
3670 &drain_queue,
3671 &par_task_executor,
3672 pt);
3673 }
3674
3675 _gc_tracer_stw->report_gc_reference_stats(stats);
3676
3677 // We have completed copying any necessary live referent objects.
3678 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3679
3680 make_pending_list_reachable();
3681
3682 rp->verify_no_references_recorded();
3683
3684 double ref_proc_time = os::elapsedTime() - ref_proc_start;
3685 policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3686 }
3687
3688 void G1CollectedHeap::make_pending_list_reachable() {
3689 if (collector_state()->in_initial_mark_gc()) {
3690 oop pll_head = Universe::reference_pending_list();
3691 if (pll_head != NULL) {
3692 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3693 _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3694 }
3695 }
3696 }
3697
3698 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3699 double merge_pss_time_start = os::elapsedTime();
3700 per_thread_states->flush();
3701 policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3702 }
3703
3704 void G1CollectedHeap::pre_evacuate_collection_set() {
3705 _expand_heap_after_alloc_failure = true;
3706 _evacuation_failed = false;
3707
3708 // Disable the hot card cache.
3709 _hot_card_cache->reset_hot_cache_claimed_index();
3710 _hot_card_cache->set_use_cache(false);
3711
3712 rem_set()->prepare_for_oops_into_collection_set_do();
3713 _preserved_marks_set.assert_empty();
3714
3715 G1GCPhaseTimes* phase_times = policy()->phase_times();
3716
3717 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3718 if (collector_state()->in_initial_mark_gc()) {
3719 double start_clear_claimed_marks = os::elapsedTime();
3720
3721 ClassLoaderDataGraph::clear_claimed_marks();
3722
3723 double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3724 phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3725 }
3726 }
3727
3728 void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3729 // Should G1EvacuationFailureALot be in effect for this GC?
3730 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3731
3732 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
3733
3734 G1GCPhaseTimes* phase_times = policy()->phase_times();
3735
3736 double start_par_time_sec = os::elapsedTime();
3737 double end_par_time_sec;
3738
3739 {
3740 const uint n_workers = workers()->active_workers();
3741 G1RootProcessor root_processor(this, n_workers);
3742 G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
3743
3744 workers()->run_task(&g1_par_task);
3745 end_par_time_sec = os::elapsedTime();
3746
3747 // Closing the inner scope will execute the destructor
3748 // for the G1RootProcessor object. We record the current
3749 // elapsed time before closing the scope so that time
3750 // taken for the destructor is NOT included in the
3751 // reported parallel time.
3752 }
3753
3754 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
3773 }
3774
3775 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3776 G1EvacuationRootClosures* root_cls = pss->closures();
3777 G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
3778
3779 size_t scanned = 0;
3780 size_t claimed = 0;
3781 size_t skipped = 0;
3782 size_t used_memory = 0;
3783
3784 Ticks start = Ticks::now();
3785 Tickspan copy_time;
3786
3787 for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
3788 HeapRegion* hr = _optional->region_at(i);
3789 G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
3790 pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
3791 copy_time += trim_ticks(pss);
3792
3793 G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
3794 scan_rs_cl.do_heap_region(hr);
3795 copy_time += trim_ticks(pss);
3796 scanned += scan_rs_cl.cards_scanned();
3797 claimed += scan_rs_cl.cards_claimed();
3798 skipped += scan_rs_cl.cards_skipped();
3799
3800 // Chunk lists for this region is no longer needed.
3801 used_memory += pss->oops_into_optional_region(hr)->used_memory();
3802 }
3803
3804 Tickspan scan_time = (Ticks::now() - start) - copy_time;
3805 G1GCPhaseTimes* p = _g1h->policy()->phase_times();
3806 p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
3807 p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
3808
3809 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
3810 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
3811 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
3812 p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
3813 }
3814
3815 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3816 Ticks start = Ticks::now();
3817 G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
3818 cl.do_void();
3819
3820 Tickspan evac_time = (Ticks::now() - start);
3821 G1GCPhaseTimes* p = _g1h->policy()->phase_times();
3822 p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
3823 assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
3824 }
3825
3826 public:
3827 G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
3828 G1ParScanThreadStateSet* per_thread_states,
3829 G1OptionalCSet* cset,
3830 RefToScanQueueSet* queues,
3831 uint n_workers) :
3832 AbstractGangTask("G1 Evacuation Optional Region Task"),
3833 _g1h(g1h),
3834 _per_thread_states(per_thread_states),
3835 _optional(cset),
3836 _queues(queues),
3837 _terminator(n_workers, _queues) {
3838 }
3839
3840 void work(uint worker_id) {
3841 ResourceMark rm;
3850 };
3851
3852 void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
3853 class G1MarkScope : public MarkScope {};
3854 G1MarkScope code_mark_scope;
3855
3856 G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
3857 workers()->run_task(&task);
3858 }
3859
3860 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3861 G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
3862 if (optional_cset.is_empty()) {
3863 return;
3864 }
3865
3866 if (evacuation_failed()) {
3867 return;
3868 }
3869
3870 G1GCPhaseTimes* phase_times = policy()->phase_times();
3871 const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
3872
3873 double start_time_sec = os::elapsedTime();
3874
3875 do {
3876 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3877 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3878
3879 if (time_left_ms < 0) {
3880 log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
3881 break;
3882 }
3883
3884 optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction());
3885 if (optional_cset.prepare_failed()) {
3886 log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
3887 break;
3888 }
3889
3890 evacuate_optional_regions(per_thread_states, &optional_cset);
3891
3892 optional_cset.complete_evacuation();
3893 if (optional_cset.evacuation_failed()) {
3894 break;
3895 }
3896 } while (!optional_cset.is_empty());
3897
3898 phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
3899 }
3900
3901 void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3902 // Also cleans the card table from temporary duplicate detection information used
3903 // during UpdateRS/ScanRS.
3904 rem_set()->cleanup_after_oops_into_collection_set_do();
3905
3906 // Process any discovered reference objects - we have
3907 // to do this _before_ we retire the GC alloc regions
3908 // as we may have to copy some 'reachable' referent
3909 // objects (and their reachable sub-graphs) that were
3910 // not copied during the pause.
3911 process_discovered_references(per_thread_states);
3912
3913 G1STWIsAliveClosure is_alive(this);
3914 G1KeepAliveClosure keep_alive(this);
3915
3916 WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
3917 policy()->phase_times()->weak_phase_times());
3918
3919 if (G1StringDedup::is_enabled()) {
3920 double string_dedup_time_ms = os::elapsedTime();
3921
3922 string_dedup_cleaning(&is_alive, &keep_alive, policy()->phase_times());
3923
3924 double string_cleanup_time_ms = (os::elapsedTime() - string_dedup_time_ms) * 1000.0;
3925 policy()->phase_times()->record_string_deduplication_time(string_cleanup_time_ms);
3926 }
3927
3928 if (evacuation_failed()) {
3929 restore_after_evac_failure();
3930
3931 // Reset the G1EvacuationFailureALot counters and flags
3932 // Note: the values are reset only when an actual
3933 // evacuation failure occurs.
3934 NOT_PRODUCT(reset_evacuation_should_fail();)
3935 }
3936
3937 _preserved_marks_set.assert_empty();
3938
3939 _allocator->release_gc_alloc_regions(evacuation_info);
3940
3941 merge_per_thread_state_info(per_thread_states);
3942
3943 // Reset and re-enable the hot card cache.
3944 // Note the counts for the cards in the regions in the
3945 // collection set are reset when the collection set is freed.
3946 _hot_card_cache->reset_hot_cache();
3947 _hot_card_cache->set_use_cache(true);
3948
3949 purge_code_root_memory();
3950
3951 redirty_logged_cards();
3952 #if COMPILER2_OR_JVMCI
3953 double start = os::elapsedTime();
3954 DerivedPointerTable::update_pointers();
3955 policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
3956 #endif
3957 policy()->print_age_table();
3958 }
3959
3960 void G1CollectedHeap::record_obj_copy_mem_stats() {
3961 policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
3962
3963 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
3964 create_g1_evac_summary(&_old_evac_stats));
3965 }
3966
3967 void G1CollectedHeap::free_region(HeapRegion* hr,
3968 FreeRegionList* free_list,
3969 bool skip_remset,
3970 bool skip_hot_card_cache,
3971 bool locked) {
3972 assert(!hr->is_free(), "the region should not be free");
3973 assert(!hr->is_empty(), "the region should not be empty");
3974 assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
3975 assert(free_list != NULL, "pre-condition");
3976
3977 if (G1VerifyBitmaps) {
3978 MemRegion mr(hr->bottom(), hr->end());
3979 concurrent_mark()->clear_range_in_prev_bitmap(mr);
3980 }
3981
3982 // Clear the card counts for this region.
3983 // Note: we only need to do this if the region is not young
3984 // (since we don't refine cards in young regions).
3985 if (!skip_hot_card_cache && !hr->is_young()) {
3986 _hot_card_cache->reset_card_counts(hr);
3987 }
3988 hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
3989 _policy->remset_tracker()->update_at_free(hr);
3990 free_list->add_ordered(hr);
3991 }
3992
3993 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
3994 FreeRegionList* free_list) {
3995 assert(hr->is_humongous(), "this is only for humongous regions");
3996 assert(free_list != NULL, "pre-condition");
3997 hr->clear_humongous();
3998 free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
3999 }
4000
4001 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4002 const uint humongous_regions_removed) {
4003 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4004 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4005 _old_set.bulk_remove(old_regions_removed);
4006 _humongous_set.bulk_remove(humongous_regions_removed);
4007 }
4008
4009 }
4099 size_t used_words = r->marked_bytes() / HeapWordSize;
4100
4101 _failure_used_words += used_words;
4102 _failure_waste_words += HeapRegion::GrainWords - used_words;
4103
4104 g1h->old_set_add(r);
4105 _after_used_bytes += r->used();
4106 }
4107 return false;
4108 }
4109
4110 void complete_work() {
4111 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4112
4113 _evacuation_info->set_regions_freed(_local_free_list.length());
4114 _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4115
4116 g1h->prepend_to_freelist(&_local_free_list);
4117 g1h->decrement_summary_bytes(_before_used_bytes);
4118
4119 G1Policy* policy = g1h->policy();
4120 policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4121
4122 g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4123 }
4124 };
4125
4126 G1CollectionSet* _collection_set;
4127 G1SerialFreeCollectionSetClosure _cl;
4128 const size_t* _surviving_young_words;
4129
4130 size_t _rs_lengths;
4131
4132 volatile jint _serial_work_claim;
4133
4134 struct WorkItem {
4135 uint region_idx;
4136 bool is_young;
4137 bool evacuation_failed;
4138
4139 WorkItem(HeapRegion* r) {
4174 private:
4175 size_t _cur_idx;
4176 WorkItem* _work_items;
4177 public:
4178 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4179
4180 virtual bool do_heap_region(HeapRegion* r) {
4181 _work_items[_cur_idx++] = WorkItem(r);
4182 return false;
4183 }
4184 };
4185
4186 void prepare_work() {
4187 G1PrepareFreeCollectionSetClosure cl(_work_items);
4188 _collection_set->iterate(&cl);
4189 }
4190
4191 void complete_work() {
4192 _cl.complete_work();
4193
4194 G1Policy* policy = G1CollectedHeap::heap()->policy();
4195 policy->record_max_rs_lengths(_rs_lengths);
4196 policy->cset_regions_freed();
4197 }
4198 public:
4199 G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4200 AbstractGangTask("G1 Free Collection Set"),
4201 _collection_set(collection_set),
4202 _cl(evacuation_info, surviving_young_words),
4203 _surviving_young_words(surviving_young_words),
4204 _rs_lengths(0),
4205 _serial_work_claim(0),
4206 _parallel_work_claim(0),
4207 _num_work_items(collection_set->region_length()),
4208 _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4209 prepare_work();
4210 }
4211
4212 ~G1FreeCollectionSetTask() {
4213 complete_work();
4214 FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4215 }
4216
4217 // Chunk size for work distribution. The chosen value has been determined experimentally
4218 // to be a good tradeoff between overhead and achievable parallelism.
4219 static uint chunk_size() { return 32; }
4220
4221 virtual void work(uint worker_id) {
4222 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->policy()->phase_times();
4223
4224 // Claim serial work.
4225 if (_serial_work_claim == 0) {
4226 jint value = Atomic::add(1, &_serial_work_claim) - 1;
4227 if (value == 0) {
4228 double serial_time = os::elapsedTime();
4229 do_serial_work();
4230 timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4231 }
4232 }
4233
4234 // Start parallel work.
4235 double young_time = 0.0;
4236 bool has_young_time = false;
4237 double non_young_time = 0.0;
4238 bool has_non_young_time = false;
4239
4240 while (true) {
4241 size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4242 size_t cur = end - chunk_size();
4279 }
4280 };
4281
4282 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4283 _eden.clear();
4284
4285 double free_cset_start_time = os::elapsedTime();
4286
4287 {
4288 uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4289 uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4290
4291 G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4292
4293 log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4294 cl.name(),
4295 num_workers,
4296 _collection_set.region_length());
4297 workers()->run_task(&cl, num_workers);
4298 }
4299 policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4300
4301 collection_set->clear();
4302 }
4303
4304 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4305 private:
4306 FreeRegionList* _free_region_list;
4307 HeapRegionSet* _proxy_set;
4308 uint _humongous_objects_reclaimed;
4309 uint _humongous_regions_reclaimed;
4310 size_t _freed_bytes;
4311 public:
4312
4313 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4314 _free_region_list(free_region_list), _proxy_set(NULL), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4315 }
4316
4317 virtual bool do_heap_region(HeapRegion* r) {
4318 if (!r->is_starts_humongous()) {
4319 return false;
4404 }
4405
4406 uint humongous_objects_reclaimed() {
4407 return _humongous_objects_reclaimed;
4408 }
4409
4410 uint humongous_regions_reclaimed() {
4411 return _humongous_regions_reclaimed;
4412 }
4413
4414 size_t bytes_freed() const {
4415 return _freed_bytes;
4416 }
4417 };
4418
4419 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4420 assert_at_safepoint_on_vm_thread();
4421
4422 if (!G1EagerReclaimHumongousObjects ||
4423 (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
4424 policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
4425 return;
4426 }
4427
4428 double start_time = os::elapsedTime();
4429
4430 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
4431
4432 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
4433 heap_region_iterate(&cl);
4434
4435 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4436
4437 G1HRPrinter* hrp = hr_printer();
4438 if (hrp->is_active()) {
4439 FreeRegionListIterator iter(&local_cleanup_list);
4440 while (iter.more_available()) {
4441 HeapRegion* hr = iter.get_next();
4442 hrp->cleanup(hr);
4443 }
4444 }
4445
4446 prepend_to_freelist(&local_cleanup_list);
4447 decrement_summary_bytes(cl.bytes_freed());
4448
4449 policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4450 cl.humongous_objects_reclaimed());
4451 }
4452
4453 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4454 public:
4455 virtual bool do_heap_region(HeapRegion* r) {
4456 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4457 G1CollectedHeap::heap()->clear_in_cset(r);
4458 r->set_young_index_in_cset(-1);
4459 return false;
4460 }
4461 };
4462
4463 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4464 G1AbandonCollectionSetClosure cl;
4465 collection_set->iterate(&cl);
4466
4467 collection_set->clear();
4468 collection_set->stop_incremental_building();
4469 }
4470
4471 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4472 return _allocator->is_retained_old_region(hr);
4473 }
4474
4475 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4476 _eden.add(hr);
4477 _policy->set_region_eden(hr);
4478 }
4479
4480 #ifdef ASSERT
4481
4482 class NoYoungRegionsClosure: public HeapRegionClosure {
4483 private:
4484 bool _success;
4485 public:
4486 NoYoungRegionsClosure() : _success(true) { }
4487 bool do_heap_region(HeapRegion* r) {
4488 if (r->is_young()) {
4489 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4490 p2i(r->bottom()), p2i(r->end()));
4491 _success = false;
4492 }
4493 return false;
4494 }
4495 bool success() { return _success; }
4496 };
4497
4627 set_used(cl.total_used());
4628 if (_archive_allocator != NULL) {
4629 _archive_allocator->clear_used();
4630 }
4631 }
4632 assert(used() == recalculate_used(),
4633 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4634 used(), recalculate_used());
4635 }
4636
4637 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4638 HeapRegion* hr = heap_region_containing(p);
4639 return hr->is_in(p);
4640 }
4641
4642 // Methods for the mutator alloc region
4643
4644 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4645 bool force) {
4646 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4647 bool should_allocate = policy()->should_allocate_mutator_region();
4648 if (force || should_allocate) {
4649 HeapRegion* new_alloc_region = new_region(word_size,
4650 HeapRegionType::Eden,
4651 false /* do_expand */);
4652 if (new_alloc_region != NULL) {
4653 set_region_short_lived_locked(new_alloc_region);
4654 _hr_printer.alloc(new_alloc_region, !should_allocate);
4655 _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4656 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4657 return new_alloc_region;
4658 }
4659 }
4660 return NULL;
4661 }
4662
4663 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4664 size_t allocated_bytes) {
4665 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4666 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4667
4668 collection_set()->add_eden_region(alloc_region);
4669 increase_used(allocated_bytes);
4670 _hr_printer.retire(alloc_region);
4671 // We update the eden sizes here, when the region is retired,
4672 // instead of when it's allocated, since this is the point that its
4673 // used space has been recorded in _summary_bytes_used.
4674 g1mm()->update_eden_size();
4675 }
4676
4677 // Methods for the GC alloc regions
4678
4679 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
4680 if (dest.is_old()) {
4681 return true;
4682 } else {
4683 return survivor_regions_count() < policy()->max_survivor_regions();
4684 }
4685 }
4686
4687 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
4688 assert(FreeList_lock->owned_by_self(), "pre-condition");
4689
4690 if (!has_more_regions(dest)) {
4691 return NULL;
4692 }
4693
4694 HeapRegionType type;
4695 if (dest.is_young()) {
4696 type = HeapRegionType::Survivor;
4697 } else {
4698 type = HeapRegionType::Old;
4699 }
4700
4701 HeapRegion* new_alloc_region = new_region(word_size,
4702 type,
4703 true /* do_expand */);
4704
4705 if (new_alloc_region != NULL) {
4706 if (type.is_survivor()) {
4707 new_alloc_region->set_survivor();
4708 _survivor.add(new_alloc_region);
4709 _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
4710 } else {
4711 new_alloc_region->set_old();
4712 _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
4713 }
4714 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4715 _hr_printer.alloc(new_alloc_region);
4716 return new_alloc_region;
4717 }
4718 return NULL;
4719 }
4720
4721 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4722 size_t allocated_bytes,
4723 InCSetState dest) {
4724 policy()->record_bytes_copied_during_gc(allocated_bytes);
4725 if (dest.is_old()) {
4726 old_set_add(alloc_region);
4727 }
4728
4729 bool const during_im = collector_state()->in_initial_mark_gc();
4730 if (during_im && allocated_bytes > 0) {
4731 _cm->root_regions()->add(alloc_region);
4732 }
4733 _hr_printer.retire(alloc_region);
4734 }
4735
4736 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4737 bool expanded = false;
4738 uint index = _hrm->find_highest_free(&expanded);
4739
4740 if (index != G1_NO_HRM_INDEX) {
4741 if (expanded) {
4742 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4743 HeapRegion::GrainWords * HeapWordSize);
4744 }
4809 HeapRegion* hr = heap_region_containing(obj);
4810 return !hr->is_pinned();
4811 }
4812
4813 void G1CollectedHeap::register_nmethod(nmethod* nm) {
4814 guarantee(nm != NULL, "sanity");
4815 RegisterNMethodOopClosure reg_cl(this, nm);
4816 nm->oops_do(®_cl);
4817 }
4818
4819 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
4820 guarantee(nm != NULL, "sanity");
4821 UnregisterNMethodOopClosure reg_cl(this, nm);
4822 nm->oops_do(®_cl, true);
4823 }
4824
4825 void G1CollectedHeap::purge_code_root_memory() {
4826 double purge_start = os::elapsedTime();
4827 G1CodeRootSet::purge();
4828 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
4829 policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
4830 }
4831
4832 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
4833 G1CollectedHeap* _g1h;
4834
4835 public:
4836 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
4837 _g1h(g1h) {}
4838
4839 void do_code_blob(CodeBlob* cb) {
4840 nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
4841 if (nm == NULL) {
4842 return;
4843 }
4844
4845 if (ScavengeRootsInCode) {
4846 _g1h->register_nmethod(nm);
4847 }
4848 }
4849 };
|