149
150 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
151 // The from card cache is not the memory that is actually committed. So we cannot
152 // take advantage of the zero_filled parameter.
153 reset_from_card_cache(start_idx, num_regions);
154 }
155
156
157 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
158 MemRegion mr) {
159 return new HeapRegion(hrs_index, bot(), mr);
160 }
161
162 // Private methods.
163
164 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
165 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
166 "the only time we use this to allocate a humongous region is "
167 "when we are allocating a single humongous region");
168
169 HeapRegion* res = _hrm.allocate_free_region(is_old);
170
171 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
172 // Currently, only attempts to allocate GC alloc regions set
173 // do_expand to true. So, we should only reach here during a
174 // safepoint. If this assumption changes we might have to
175 // reconsider the use of _expand_heap_after_alloc_failure.
176 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
177
178 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
179 word_size * HeapWordSize);
180
181 if (expand(word_size * HeapWordSize)) {
182 // Given that expand() succeeded in expanding the heap, and we
183 // always expand the heap by an amount aligned to the heap
184 // region size, the free list should in theory not be empty.
185 // In either case allocate_free_region() will check for NULL.
186 res = _hrm.allocate_free_region(is_old);
187 } else {
188 _expand_heap_after_alloc_failure = false;
189 }
190 }
191 return res;
192 }
193
194 HeapWord*
195 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
196 uint num_regions,
197 size_t word_size) {
198 assert(first != G1_NO_HRM_INDEX, "pre-condition");
199 assert(is_humongous(word_size), "word_size should be humongous");
200 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
201
202 // Index of last region in the series.
203 uint last = first + num_regions - 1;
204
205 // We need to initialize the region(s) we just discovered. This is
206 // a bit tricky given that it can happen concurrently with
320 // Otherwise, if using ex regions might help, try with ex given back.
321 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
322 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
323
324 _verifier->verify_region_sets_optional();
325
326 uint first = G1_NO_HRM_INDEX;
327 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
328
329 if (obj_regions == 1) {
330 // Only one region to allocate, try to use a fast path by directly allocating
331 // from the free lists. Do not try to expand here, we will potentially do that
332 // later.
333 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
334 if (hr != NULL) {
335 first = hr->hrm_index();
336 }
337 } else {
338 // Policy: Try only empty regions (i.e. already committed first). Maybe we
339 // are lucky enough to find some.
340 first = _hrm.find_contiguous_only_empty(obj_regions);
341 if (first != G1_NO_HRM_INDEX) {
342 _hrm.allocate_free_regions_starting_at(first, obj_regions);
343 }
344 }
345
346 if (first == G1_NO_HRM_INDEX) {
347 // Policy: We could not find enough regions for the humongous object in the
348 // free list. Look through the heap to find a mix of free and uncommitted regions.
349 // If so, try expansion.
350 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
351 if (first != G1_NO_HRM_INDEX) {
352 // We found something. Make sure these regions are committed, i.e. expand
353 // the heap. Alternatively we could do a defragmentation GC.
354 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
355 word_size * HeapWordSize);
356
357 _hrm.expand_at(first, obj_regions, workers());
358 g1_policy()->record_new_heap_size(num_regions());
359
360 #ifdef ASSERT
361 for (uint i = first; i < first + obj_regions; ++i) {
362 HeapRegion* hr = region_at(i);
363 assert(hr->is_free(), "sanity");
364 assert(hr->is_empty(), "sanity");
365 assert(is_on_master_free_list(hr), "sanity");
366 }
367 #endif
368 _hrm.allocate_free_regions_starting_at(first, obj_regions);
369 } else {
370 // Policy: Potentially trigger a defragmentation GC.
371 }
372 }
373
374 HeapWord* result = NULL;
375 if (first != G1_NO_HRM_INDEX) {
376 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
377 assert(result != NULL, "it should always return a valid result");
378
379 // A successful humongous object allocation changes the used space
380 // information of the old generation so we need to recalculate the
381 // sizes and update the jstat counters here.
382 g1mm()->update_sizes();
383 }
384
385 _verifier->verify_region_sets_optional();
386
387 return result;
388 }
537 return NULL;
538 }
539 return _archive_allocator->archive_mem_allocate(word_size);
540 }
541
542 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
543 size_t end_alignment_in_bytes) {
544 assert_at_safepoint_on_vm_thread();
545 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
546
547 // Call complete_archive to do the real work, filling in the MemRegion
548 // array with the archive regions.
549 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
550 delete _archive_allocator;
551 _archive_allocator = NULL;
552 }
553
554 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
555 assert(ranges != NULL, "MemRegion array NULL");
556 assert(count != 0, "No MemRegions provided");
557 MemRegion reserved = _hrm.reserved();
558 for (size_t i = 0; i < count; i++) {
559 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
560 return false;
561 }
562 }
563 return true;
564 }
565
566 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
567 size_t count,
568 bool open) {
569 assert(!is_init_completed(), "Expect to be called at JVM init time");
570 assert(ranges != NULL, "MemRegion array NULL");
571 assert(count != 0, "No MemRegions provided");
572 MutexLockerEx x(Heap_lock);
573
574 MemRegion reserved = _hrm.reserved();
575 HeapWord* prev_last_addr = NULL;
576 HeapRegion* prev_last_region = NULL;
577
578 // Temporarily disable pretouching of heap pages. This interface is used
579 // when mmap'ing archived heap data in, so pre-touching is wasted.
580 FlagSetting fs(AlwaysPreTouch, false);
581
582 // Enable archive object checking used by G1MarkSweep. We have to let it know
583 // about each archive range, so that objects in those ranges aren't marked.
584 G1ArchiveAllocator::enable_archive_object_check();
585
586 // For each specified MemRegion range, allocate the corresponding G1
587 // regions and mark them as archive regions. We expect the ranges
588 // in ascending starting address order, without overlap.
589 for (size_t i = 0; i < count; i++) {
590 MemRegion curr_range = ranges[i];
591 HeapWord* start_address = curr_range.start();
592 size_t word_size = curr_range.word_size();
593 HeapWord* last_address = curr_range.last();
594 size_t commits = 0;
595
596 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
597 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
598 p2i(start_address), p2i(last_address));
599 guarantee(start_address > prev_last_addr,
600 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
601 p2i(start_address), p2i(prev_last_addr));
602 prev_last_addr = last_address;
603
604 // Check for ranges that start in the same G1 region in which the previous
605 // range ended, and adjust the start address so we don't try to allocate
606 // the same region again. If the current range is entirely within that
607 // region, skip it, just adjusting the recorded top.
608 HeapRegion* start_region = _hrm.addr_to_region(start_address);
609 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
610 start_address = start_region->end();
611 if (start_address > last_address) {
612 increase_used(word_size * HeapWordSize);
613 start_region->set_top(last_address + 1);
614 continue;
615 }
616 start_region->set_top(start_address);
617 curr_range = MemRegion(start_address, last_address + 1);
618 start_region = _hrm.addr_to_region(start_address);
619 }
620
621 // Perform the actual region allocation, exiting if it fails.
622 // Then note how much new space we have allocated.
623 if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
624 return false;
625 }
626 increase_used(word_size * HeapWordSize);
627 if (commits != 0) {
628 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
629 HeapRegion::GrainWords * HeapWordSize * commits);
630
631 }
632
633 // Mark each G1 region touched by the range as archive, add it to
634 // the old set, and set top.
635 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
636 HeapRegion* last_region = _hrm.addr_to_region(last_address);
637 prev_last_region = last_region;
638
639 while (curr_region != NULL) {
640 assert(curr_region->is_empty() && !curr_region->is_pinned(),
641 "Region already in use (index %u)", curr_region->hrm_index());
642 if (open) {
643 curr_region->set_open_archive();
644 } else {
645 curr_region->set_closed_archive();
646 }
647 _hr_printer.alloc(curr_region);
648 _archive_set.add(curr_region);
649 HeapWord* top;
650 HeapRegion* next_region;
651 if (curr_region != last_region) {
652 top = curr_region->end();
653 next_region = _hrm.next_region_in_heap(curr_region);
654 } else {
655 top = last_address + 1;
656 next_region = NULL;
657 }
658 curr_region->set_top(top);
659 curr_region->set_first_dead(top);
660 curr_region->set_end_of_live(top);
661 curr_region = next_region;
662 }
663
664 // Notify mark-sweep of the archive
665 G1ArchiveAllocator::set_range_archive(curr_range, open);
666 }
667 return true;
668 }
669
670 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
671 assert(!is_init_completed(), "Expect to be called at JVM init time");
672 assert(ranges != NULL, "MemRegion array NULL");
673 assert(count != 0, "No MemRegions provided");
674 MemRegion reserved = _hrm.reserved();
675 HeapWord *prev_last_addr = NULL;
676 HeapRegion* prev_last_region = NULL;
677
678 // For each MemRegion, create filler objects, if needed, in the G1 regions
679 // that contain the address range. The address range actually within the
680 // MemRegion will not be modified. That is assumed to have been initialized
681 // elsewhere, probably via an mmap of archived heap data.
682 MutexLockerEx x(Heap_lock);
683 for (size_t i = 0; i < count; i++) {
684 HeapWord* start_address = ranges[i].start();
685 HeapWord* last_address = ranges[i].last();
686
687 assert(reserved.contains(start_address) && reserved.contains(last_address),
688 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
689 p2i(start_address), p2i(last_address));
690 assert(start_address > prev_last_addr,
691 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
692 p2i(start_address), p2i(prev_last_addr));
693
694 HeapRegion* start_region = _hrm.addr_to_region(start_address);
695 HeapRegion* last_region = _hrm.addr_to_region(last_address);
696 HeapWord* bottom_address = start_region->bottom();
697
698 // Check for a range beginning in the same region in which the
699 // previous one ended.
700 if (start_region == prev_last_region) {
701 bottom_address = prev_last_addr + 1;
702 }
703
704 // Verify that the regions were all marked as archive regions by
705 // alloc_archive_regions.
706 HeapRegion* curr_region = start_region;
707 while (curr_region != NULL) {
708 guarantee(curr_region->is_archive(),
709 "Expected archive region at index %u", curr_region->hrm_index());
710 if (curr_region != last_region) {
711 curr_region = _hrm.next_region_in_heap(curr_region);
712 } else {
713 curr_region = NULL;
714 }
715 }
716
717 prev_last_addr = last_address;
718 prev_last_region = last_region;
719
720 // Fill the memory below the allocated range with dummy object(s),
721 // if the region bottom does not match the range start, or if the previous
722 // range ended within the same G1 region, and there is a gap.
723 if (start_address != bottom_address) {
724 size_t fill_size = pointer_delta(start_address, bottom_address);
725 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
726 increase_used(fill_size * HeapWordSize);
727 }
728 }
729 }
730
731 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
740 if (result == NULL) {
741 *actual_word_size = desired_word_size;
742 result = attempt_allocation_slow(desired_word_size);
743 }
744
745 assert_heap_not_locked();
746 if (result != NULL) {
747 assert(*actual_word_size != 0, "Actual size must have been set here");
748 dirty_young_block(result, *actual_word_size);
749 } else {
750 *actual_word_size = 0;
751 }
752
753 return result;
754 }
755
756 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
757 assert(!is_init_completed(), "Expect to be called at JVM init time");
758 assert(ranges != NULL, "MemRegion array NULL");
759 assert(count != 0, "No MemRegions provided");
760 MemRegion reserved = _hrm.reserved();
761 HeapWord* prev_last_addr = NULL;
762 HeapRegion* prev_last_region = NULL;
763 size_t size_used = 0;
764 size_t uncommitted_regions = 0;
765
766 // For each Memregion, free the G1 regions that constitute it, and
767 // notify mark-sweep that the range is no longer to be considered 'archive.'
768 MutexLockerEx x(Heap_lock);
769 for (size_t i = 0; i < count; i++) {
770 HeapWord* start_address = ranges[i].start();
771 HeapWord* last_address = ranges[i].last();
772
773 assert(reserved.contains(start_address) && reserved.contains(last_address),
774 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
775 p2i(start_address), p2i(last_address));
776 assert(start_address > prev_last_addr,
777 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
778 p2i(start_address), p2i(prev_last_addr));
779 size_used += ranges[i].byte_size();
780 prev_last_addr = last_address;
781
782 HeapRegion* start_region = _hrm.addr_to_region(start_address);
783 HeapRegion* last_region = _hrm.addr_to_region(last_address);
784
785 // Check for ranges that start in the same G1 region in which the previous
786 // range ended, and adjust the start address so we don't try to free
787 // the same region again. If the current range is entirely within that
788 // region, skip it.
789 if (start_region == prev_last_region) {
790 start_address = start_region->end();
791 if (start_address > last_address) {
792 continue;
793 }
794 start_region = _hrm.addr_to_region(start_address);
795 }
796 prev_last_region = last_region;
797
798 // After verifying that each region was marked as an archive region by
799 // alloc_archive_regions, set it free and empty and uncommit it.
800 HeapRegion* curr_region = start_region;
801 while (curr_region != NULL) {
802 guarantee(curr_region->is_archive(),
803 "Expected archive region at index %u", curr_region->hrm_index());
804 uint curr_index = curr_region->hrm_index();
805 _archive_set.remove(curr_region);
806 curr_region->set_free();
807 curr_region->set_top(curr_region->bottom());
808 if (curr_region != last_region) {
809 curr_region = _hrm.next_region_in_heap(curr_region);
810 } else {
811 curr_region = NULL;
812 }
813 _hrm.shrink_at(curr_index, 1);
814 uncommitted_regions++;
815 }
816
817 // Notify mark-sweep that this is no longer an archive range.
818 G1ArchiveAllocator::set_range_archive(ranges[i], false);
819 }
820
821 if (uncommitted_regions != 0) {
822 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
823 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
824 }
825 decrease_used(size_used);
826 }
827
828 oop G1CollectedHeap::materialize_archived_object(oop obj) {
829 assert(obj != NULL, "archived obj is NULL");
830 assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object");
831
832 // Loading an archived object makes it strongly reachable. If it is
833 // loaded during concurrent marking, it must be enqueued to the SATB
1008 _ref_processor_cm->verify_no_references_recorded();
1009
1010 // Abandon current iterations of concurrent marking and concurrent
1011 // refinement, if any are in progress.
1012 concurrent_mark()->concurrent_cycle_abort();
1013 }
1014
1015 void G1CollectedHeap::prepare_heap_for_full_collection() {
1016 // Make sure we'll choose a new allocation region afterwards.
1017 _allocator->release_mutator_alloc_region();
1018 _allocator->abandon_gc_alloc_regions();
1019 g1_rem_set()->cleanupHRRS();
1020
1021 // We may have added regions to the current incremental collection
1022 // set between the last GC or pause and now. We need to clear the
1023 // incremental collection set and then start rebuilding it afresh
1024 // after this full GC.
1025 abandon_collection_set(collection_set());
1026
1027 tear_down_region_sets(false /* free_list_only */);
1028 }
1029
1030 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1031 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1032 assert(used() == recalculate_used(), "Should be equal");
1033 _verifier->verify_region_sets_optional();
1034 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1035 _verifier->check_bitmaps("Full GC Start");
1036 }
1037
1038 void G1CollectedHeap::prepare_heap_for_mutators() {
1039 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1040 ClassLoaderDataGraph::purge();
1041 MetaspaceUtils::verify_metrics();
1042
1043 // Prepare heap for normal collections.
1044 assert(num_free_regions() == 0, "we should not have added any free regions");
1045 rebuild_region_sets(false /* free_list_only */);
1046 abort_refinement();
1047 resize_heap_if_necessary();
1048
1049 // Rebuild the strong code root lists for each region
1050 rebuild_strong_code_roots();
1051
1052 // Purge code root memory
1053 purge_code_root_memory();
1054
1055 // Start a new incremental collection set for the next pause
1056 start_new_collection_set();
1057
1058 _allocator->init_mutator_alloc_region();
1059
1060 // Post collection state updates.
1061 MetaspaceGC::compute_new_size();
1062 }
1063
1064 void G1CollectedHeap::abort_refinement() {
1065 if (_hot_card_cache->use_cache()) {
1066 _hot_card_cache->reset_hot_cache();
1067 }
1068
1069 // Discard all remembered set updates.
1070 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1071 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1072 }
1073
1074 void G1CollectedHeap::verify_after_full_collection() {
1075 _hrm.verify_optional();
1076 _verifier->verify_region_sets_optional();
1077 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1078 // Clear the previous marking bitmap, if needed for bitmap verification.
1079 // Note we cannot do this when we clear the next marking bitmap in
1080 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1081 // objects marked during a full GC against the previous bitmap.
1082 // But we need to clear it before calling check_bitmaps below since
1083 // the full GC has compacted objects and updated TAMS but not updated
1084 // the prev bitmap.
1085 if (G1VerifyBitmaps) {
1086 GCTraceTime(Debug, gc)("Clear Prev Bitmap for Verification");
1087 _cm->clear_prev_bitmap(workers());
1088 }
1089 // This call implicitly verifies that the next bitmap is clear after Full GC.
1090 _verifier->check_bitmaps("Full GC End");
1091
1092 // At this point there should be no regions in the
1093 // entire heap tagged as young.
1094 assert(check_young_list_empty(), "young list should be empty at this point");
1095
1307 // appropriate.
1308 return NULL;
1309 }
1310
1311 // Attempting to expand the heap sufficiently
1312 // to support an allocation of the given "word_size". If
1313 // successful, perform the allocation and return the address of the
1314 // allocated block, or else "NULL".
1315
1316 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1317 assert_at_safepoint_on_vm_thread();
1318
1319 _verifier->verify_region_sets_optional();
1320
1321 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1322 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1323 word_size * HeapWordSize);
1324
1325
1326 if (expand(expand_bytes, _workers)) {
1327 _hrm.verify_optional();
1328 _verifier->verify_region_sets_optional();
1329 return attempt_allocation_at_safepoint(word_size,
1330 false /* expect_null_mutator_alloc_region */);
1331 }
1332 return NULL;
1333 }
1334
1335 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1336 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1337 aligned_expand_bytes = align_up(aligned_expand_bytes,
1338 HeapRegion::GrainBytes);
1339
1340 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1341 expand_bytes, aligned_expand_bytes);
1342
1343 if (is_maximal_no_gc()) {
1344 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1345 return false;
1346 }
1347
1348 double expand_heap_start_time_sec = os::elapsedTime();
1349 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1350 assert(regions_to_expand > 0, "Must expand by at least one region");
1351
1352 uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1353 if (expand_time_ms != NULL) {
1354 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1355 }
1356
1357 if (expanded_by > 0) {
1358 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1359 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1360 g1_policy()->record_new_heap_size(num_regions());
1361 } else {
1362 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1363
1364 // The expansion of the virtual storage space was unsuccessful.
1365 // Let's see if it was because we ran out of swap.
1366 if (G1ExitOnExpansionFailure &&
1367 _hrm.available() >= regions_to_expand) {
1368 // We had head room...
1369 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1370 }
1371 }
1372 return regions_to_expand > 0;
1373 }
1374
1375 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1376 size_t aligned_shrink_bytes =
1377 ReservedSpace::page_align_size_down(shrink_bytes);
1378 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1379 HeapRegion::GrainBytes);
1380 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1381
1382 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1383 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1384
1385
1386 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1387 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1388 if (num_regions_removed > 0) {
1389 g1_policy()->record_new_heap_size(num_regions());
1390 } else {
1391 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1392 }
1393 }
1394
1395 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1396 _verifier->verify_region_sets_optional();
1397
1398 // We should only reach here at the end of a Full GC or during Remark which
1399 // means we should not not be holding to any GC alloc regions. The method
1400 // below will make sure of that and do any remaining clean up.
1401 _allocator->abandon_gc_alloc_regions();
1402
1403 // Instead of tearing down / rebuilding the free lists here, we
1404 // could instead use the remove_all_pending() method on free_list to
1405 // remove only the ones that we need to remove.
1406 tear_down_region_sets(true /* free_list_only */);
1407 shrink_helper(shrink_bytes);
1408 rebuild_region_sets(true /* free_list_only */);
1409
1410 _hrm.verify_optional();
1411 _verifier->verify_region_sets_optional();
1412 }
1413
1414 class OldRegionSetChecker : public HeapRegionSetChecker {
1415 public:
1416 void check_mt_safety() {
1417 // Master Old Set MT safety protocol:
1418 // (a) If we're at a safepoint, operations on the master old set
1419 // should be invoked:
1420 // - by the VM thread (which will serialize them), or
1421 // - by the GC workers while holding the FreeList_lock, if we're
1422 // at a safepoint for an evacuation pause (this lock is taken
1423 // anyway when an GC alloc region is retired so that a new one
1424 // is allocated from the free list), or
1425 // - by the GC workers while holding the OldSets_lock, if we're at a
1426 // safepoint for a cleanup pause.
1427 // (b) If we're not at a safepoint, operations on the master old set
1428 // should be invoked while holding the Heap_lock.
1429
1430 if (SafepointSynchronize::is_at_safepoint()) {
1468 guarantee(Heap_lock->owned_by_self(),
1469 "master humongous set MT safety protocol outside a safepoint");
1470 }
1471 }
1472 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1473 const char* get_description() { return "Humongous Regions"; }
1474 };
1475
1476 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1477 CollectedHeap(),
1478 _young_gen_sampling_thread(NULL),
1479 _workers(NULL),
1480 _collector_policy(collector_policy),
1481 _card_table(NULL),
1482 _soft_ref_policy(),
1483 _old_set("Old Region Set", new OldRegionSetChecker()),
1484 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1485 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1486 _bot(NULL),
1487 _listener(),
1488 _hrm(),
1489 _allocator(NULL),
1490 _verifier(NULL),
1491 _summary_bytes_used(0),
1492 _archive_allocator(NULL),
1493 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1494 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1495 _expand_heap_after_alloc_failure(true),
1496 _g1mm(NULL),
1497 _humongous_reclaim_candidates(),
1498 _has_humongous_reclaim_candidates(false),
1499 _hr_printer(),
1500 _collector_state(),
1501 _old_marking_cycles_started(0),
1502 _old_marking_cycles_completed(0),
1503 _eden(),
1504 _survivor(),
1505 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1506 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1507 _g1_policy(new G1Policy(_gc_timer_stw)),
1508 _heap_sizing_policy(NULL),
1601 return JNI_ENOMEM;
1602 }
1603 return JNI_OK;
1604 }
1605
1606 jint G1CollectedHeap::initialize() {
1607 os::enable_vtime();
1608
1609 // Necessary to satisfy locking discipline assertions.
1610
1611 MutexLocker x(Heap_lock);
1612
1613 // While there are no constraints in the GC code that HeapWordSize
1614 // be any particular value, there are multiple other areas in the
1615 // system which believe this to be true (e.g. oop->object_size in some
1616 // cases incorrectly returns the size in wordSize units rather than
1617 // HeapWordSize).
1618 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1619
1620 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1621 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1622 size_t heap_alignment = collector_policy()->heap_alignment();
1623
1624 // Ensure that the sizes are properly aligned.
1625 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1626 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1627 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1628
1629 // Reserve the maximum.
1630
1631 // When compressed oops are enabled, the preferred heap base
1632 // is calculated by subtracting the requested size from the
1633 // 32Gb boundary and using the result as the base address for
1634 // heap reservation. If the requested size is not aligned to
1635 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1636 // into the ReservedHeapSpace constructor) then the actual
1637 // base of the reserved heap may end up differing from the
1638 // address that was requested (i.e. the preferred heap base).
1639 // If this happens then we could end up using a non-optimal
1640 // compressed oops mode.
1641
1665 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1666 &bs->dirty_card_queue_buffer_allocator(),
1667 -1, // temp. never trigger
1668 -1, // temp. no limit
1669 Shared_DirtyCardQ_lock,
1670 true); // init_free_ids
1671
1672 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1673 &bs->dirty_card_queue_buffer_allocator(),
1674 -1, // never trigger processing
1675 -1, // no limit on length
1676 Shared_DirtyCardQ_lock);
1677
1678 // Create the hot card cache.
1679 _hot_card_cache = new G1HotCardCache(this);
1680
1681 // Carve out the G1 part of the heap.
1682 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1683 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1684 G1RegionToSpaceMapper* heap_storage =
1685 G1RegionToSpaceMapper::create_mapper(g1_rs,
1686 g1_rs.size(),
1687 page_size,
1688 HeapRegion::GrainBytes,
1689 1,
1690 mtJavaHeap);
1691 os::trace_page_sizes("Heap",
1692 collector_policy()->min_heap_byte_size(),
1693 max_byte_size,
1694 page_size,
1695 heap_rs.base(),
1696 heap_rs.size());
1697 heap_storage->set_mapping_changed_listener(&_listener);
1698
1699 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1700 G1RegionToSpaceMapper* bot_storage =
1701 create_aux_memory_mapper("Block Offset Table",
1702 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1703 G1BlockOffsetTable::heap_map_factor());
1704
1705 G1RegionToSpaceMapper* cardtable_storage =
1706 create_aux_memory_mapper("Card Table",
1707 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1708 G1CardTable::heap_map_factor());
1709
1710 G1RegionToSpaceMapper* card_counts_storage =
1711 create_aux_memory_mapper("Card Counts Table",
1712 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1713 G1CardCounts::heap_map_factor());
1714
1715 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1716 G1RegionToSpaceMapper* prev_bitmap_storage =
1717 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1718 G1RegionToSpaceMapper* next_bitmap_storage =
1719 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1720
1721 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1722 _card_table->initialize(cardtable_storage);
1723 // Do later initialization work for concurrent refinement.
1724 _hot_card_cache->initialize(card_counts_storage);
1725
1726 // 6843694 - ensure that the maximum region index can fit
1727 // in the remembered set structures.
1728 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1729 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1730
1731 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1732 // start within the first card.
1733 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1734 // Also create a G1 rem set.
1735 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1736 _g1_rem_set->initialize(max_capacity(), max_regions());
1737
1738 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1739 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1740 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1741 "too many cards per region");
1742
1743 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1744
1745 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1746
1747 {
1748 HeapWord* start = _hrm.reserved().start();
1749 HeapWord* end = _hrm.reserved().end();
1750 size_t granularity = HeapRegion::GrainBytes;
1751
1752 _in_cset_fast_test.initialize(start, end, granularity);
1753 _humongous_reclaim_candidates.initialize(start, end, granularity);
1754 }
1755
1756 // Create the G1ConcurrentMark data structure and thread.
1757 // (Must do this late, so that "max_regions" is defined.)
1758 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1759 if (_cm == NULL || !_cm->completed_initialization()) {
1760 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1761 return JNI_ENOMEM;
1762 }
1763 _cm_thread = _cm->cm_thread();
1764
1765 // Now expand into the initial heap size.
1766 if (!expand(init_byte_size, _workers)) {
1767 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1768 return JNI_ENOMEM;
1769 }
1770
1771 // Perform any initialization actions delegated to the policy.
1772 g1_policy()->init(this, &_collection_set);
1773
1774 jint ecode = initialize_concurrent_refinement();
1775 if (ecode != JNI_OK) {
1776 return ecode;
1777 }
1778
1779 ecode = initialize_young_gen_sampling_thread();
1780 if (ecode != JNI_OK) {
1781 return ecode;
1782 }
1783
1784 {
1785 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1786 dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
1787 dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
1788 }
1789
1790 // Here we allocate the dummy HeapRegion that is required by the
1791 // G1AllocRegion class.
1792 HeapRegion* dummy_region = _hrm.get_dummy_region();
1793
1794 // We'll re-use the same region whether the alloc region will
1795 // require BOT updates or not and, if it doesn't, then a non-young
1796 // region will complain that it cannot support allocations without
1797 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1798 dummy_region->set_eden();
1799 // Make sure it's full.
1800 dummy_region->set_top(dummy_region->end());
1801 G1AllocRegion::setup(this, dummy_region);
1802
1803 _allocator->init_mutator_alloc_region();
1804
1805 // Do create of the monitoring and management support so that
1806 // values in the heap have been properly initialized.
1807 _g1mm = new G1MonitoringSupport(this);
1808
1809 G1StringDedup::initialize();
1810
1811 _preserved_marks_set.init(ParallelGCThreads);
1812
1892 false, // Reference discovery is not atomic
1893 &_is_alive_closure_cm, // is alive closure
1894 true); // allow changes to number of processing threads
1895
1896 // STW ref processor
1897 _ref_processor_stw =
1898 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1899 mt_processing, // mt processing
1900 ParallelGCThreads, // degree of mt processing
1901 (ParallelGCThreads > 1), // mt discovery
1902 ParallelGCThreads, // degree of mt discovery
1903 true, // Reference discovery is atomic
1904 &_is_alive_closure_stw, // is alive closure
1905 true); // allow changes to number of processing threads
1906 }
1907
1908 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1909 return _collector_policy;
1910 }
1911
1912 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1913 return &_soft_ref_policy;
1914 }
1915
1916 size_t G1CollectedHeap::capacity() const {
1917 return _hrm.length() * HeapRegion::GrainBytes;
1918 }
1919
1920 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1921 return _hrm.total_free_bytes();
1922 }
1923
1924 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1925 _hot_card_cache->drain(cl, worker_i);
1926 }
1927
1928 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1929 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1930 size_t n_completed_buffers = 0;
1931 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1932 n_completed_buffers++;
1933 }
1934 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1935 dcqs.clear_n_completed_buffers();
1936 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1937 }
1938
1939 // Computes the sum of the storage used by the various regions.
1940 size_t G1CollectedHeap::used() const {
1941 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2116 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2117
2118 // Schedule a standard evacuation pause. We're setting word_size
2119 // to 0 which means that we are not requesting a post-GC allocation.
2120 VM_G1CollectForAllocation op(0, /* word_size */
2121 gc_count_before,
2122 cause,
2123 false, /* should_initiate_conc_mark */
2124 g1_policy()->max_pause_time_ms());
2125 VMThread::execute(&op);
2126 } else {
2127 // Schedule a Full GC.
2128 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2129 VMThread::execute(&op);
2130 }
2131 }
2132 } while (retry_gc);
2133 }
2134
2135 bool G1CollectedHeap::is_in(const void* p) const {
2136 if (_hrm.reserved().contains(p)) {
2137 // Given that we know that p is in the reserved space,
2138 // heap_region_containing() should successfully
2139 // return the containing region.
2140 HeapRegion* hr = heap_region_containing(p);
2141 return hr->is_in(p);
2142 } else {
2143 return false;
2144 }
2145 }
2146
2147 #ifdef ASSERT
2148 bool G1CollectedHeap::is_in_exact(const void* p) const {
2149 bool contains = reserved_region().contains(p);
2150 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2151 if (contains && available) {
2152 return true;
2153 } else {
2154 return false;
2155 }
2156 }
2157 #endif
2158
2159 // Iteration functions.
2160
2161 // Iterates an ObjectClosure over all objects within a HeapRegion.
2162
2163 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2164 ObjectClosure* _cl;
2165 public:
2166 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2167 bool do_heap_region(HeapRegion* r) {
2168 if (!r->is_continues_humongous()) {
2169 r->object_iterate(_cl);
2170 }
2171 return false;
2172 }
2173 };
2174
2175 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2176 IterateObjectClosureRegionClosure blk(cl);
2177 heap_region_iterate(&blk);
2178 }
2179
2180 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2181 _hrm.iterate(cl);
2182 }
2183
2184 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2185 HeapRegionClaimer *hrclaimer,
2186 uint worker_id) const {
2187 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2188 }
2189
2190 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2191 HeapRegionClaimer *hrclaimer) const {
2192 _hrm.par_iterate(cl, hrclaimer, 0);
2193 }
2194
2195 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2196 _collection_set.iterate(cl);
2197 }
2198
2199 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2200 _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2201 }
2202
2203 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2204 HeapRegion* hr = heap_region_containing(addr);
2205 return hr->block_start(addr);
2206 }
2207
2208 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2209 HeapRegion* hr = heap_region_containing(addr);
2210 return hr->block_size(addr);
2211 }
2212
2221
2222 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2223 return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2224 }
2225
2226 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2227 return _eden.length() * HeapRegion::GrainBytes;
2228 }
2229
2230 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2231 // must be equal to the humongous object limit.
2232 size_t G1CollectedHeap::max_tlab_size() const {
2233 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2234 }
2235
2236 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2237 return _allocator->unsafe_max_tlab_alloc();
2238 }
2239
2240 size_t G1CollectedHeap::max_capacity() const {
2241 return _hrm.reserved().byte_size();
2242 }
2243
2244 jlong G1CollectedHeap::millis_since_last_gc() {
2245 // See the notes in GenCollectedHeap::millis_since_last_gc()
2246 // for more information about the implementation.
2247 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2248 _g1_policy->collection_pause_end_millis();
2249 if (ret_val < 0) {
2250 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2251 ". returning zero instead.", ret_val);
2252 return 0;
2253 }
2254 return ret_val;
2255 }
2256
2257 void G1CollectedHeap::deduplicate_string(oop str) {
2258 assert(java_lang_String::is_instance(str), "invariant");
2259
2260 if (G1StringDedup::is_enabled()) {
2261 G1StringDedup::deduplicate(str);
2311 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2312 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
2313 default: ShouldNotReachHere();
2314 }
2315 return false; // keep some compilers happy
2316 }
2317
2318 void G1CollectedHeap::print_heap_regions() const {
2319 LogTarget(Trace, gc, heap, region) lt;
2320 if (lt.is_enabled()) {
2321 LogStream ls(lt);
2322 print_regions_on(&ls);
2323 }
2324 }
2325
2326 void G1CollectedHeap::print_on(outputStream* st) const {
2327 st->print(" %-20s", "garbage-first heap");
2328 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2329 capacity()/K, used_unlocked()/K);
2330 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
2331 p2i(_hrm.reserved().start()),
2332 p2i(_hrm.reserved().end()));
2333 st->cr();
2334 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2335 uint young_regions = young_regions_count();
2336 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2337 (size_t) young_regions * HeapRegion::GrainBytes / K);
2338 uint survivor_regions = survivor_regions_count();
2339 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2340 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2341 st->cr();
2342 MetaspaceUtils::print_on(st);
2343 }
2344
2345 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2346 st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2347 "HS=humongous(starts), HC=humongous(continues), "
2348 "CS=collection set, F=free, A=archive, "
2349 "TAMS=top-at-mark-start (previous, next)");
2350 PrintRegionClosure blk(st);
2351 heap_region_iterate(&blk);
2352 }
2487 // This summary needs to be printed before incrementing total collections.
2488 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2489
2490 // Update common counters.
2491 increment_total_collections(full /* full gc */);
2492 if (full) {
2493 increment_old_marking_cycles_started();
2494 }
2495
2496 // Fill TLAB's and such
2497 double start = os::elapsedTime();
2498 ensure_parsability(true);
2499 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2500 }
2501
2502 void G1CollectedHeap::gc_epilogue(bool full) {
2503 // Update common counters.
2504 if (full) {
2505 // Update the number of full collections that have been completed.
2506 increment_old_marking_cycles_completed(false /* concurrent */);
2507 }
2508
2509 // We are at the end of the GC. Total collections has already been increased.
2510 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2511
2512 // FIXME: what is this about?
2513 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2514 // is set.
2515 #if COMPILER2_OR_JVMCI
2516 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2517 #endif
2518 // always_do_update_barrier = true;
2519
2520 double start = os::elapsedTime();
2521 resize_all_tlabs();
2522 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2523
2524 MemoryService::track_memory_usage();
2525 // We have just completed a GC. Update the soft reference
2526 // policy with the new heap occupancy
3098 #ifdef TRACESPINNING
3099 ParallelTaskTerminator::print_termination_counts();
3100 #endif
3101
3102 gc_epilogue(false);
3103 }
3104
3105 // Print the remainder of the GC log output.
3106 if (evacuation_failed()) {
3107 log_info(gc)("To-space exhausted");
3108 }
3109
3110 g1_policy()->print_phases();
3111 heap_transition.print();
3112
3113 // It is not yet to safe to tell the concurrent mark to
3114 // start as we have some optional output below. We don't want the
3115 // output from the concurrent mark thread interfering with this
3116 // logging output either.
3117
3118 _hrm.verify_optional();
3119 _verifier->verify_region_sets_optional();
3120
3121 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3122 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3123
3124 print_heap_after_gc();
3125 print_heap_regions();
3126 trace_heap_after_gc(_gc_tracer_stw);
3127
3128 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3129 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3130 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3131 // before any GC notifications are raised.
3132 g1mm()->update_sizes();
3133
3134 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3135 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3136 _gc_timer_stw->register_gc_end();
3137 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3138 }
3775 DerivedPointerTable::update_pointers();
3776 g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
3777 #endif
3778 g1_policy()->print_age_table();
3779 }
3780
3781 void G1CollectedHeap::record_obj_copy_mem_stats() {
3782 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
3783
3784 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
3785 create_g1_evac_summary(&_old_evac_stats));
3786 }
3787
3788 void G1CollectedHeap::free_region(HeapRegion* hr,
3789 FreeRegionList* free_list,
3790 bool skip_remset,
3791 bool skip_hot_card_cache,
3792 bool locked) {
3793 assert(!hr->is_free(), "the region should not be free");
3794 assert(!hr->is_empty(), "the region should not be empty");
3795 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
3796 assert(free_list != NULL, "pre-condition");
3797
3798 if (G1VerifyBitmaps) {
3799 MemRegion mr(hr->bottom(), hr->end());
3800 concurrent_mark()->clear_range_in_prev_bitmap(mr);
3801 }
3802
3803 // Clear the card counts for this region.
3804 // Note: we only need to do this if the region is not young
3805 // (since we don't refine cards in young regions).
3806 if (!skip_hot_card_cache && !hr->is_young()) {
3807 _hot_card_cache->reset_card_counts(hr);
3808 }
3809 hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
3810 _g1_policy->remset_tracker()->update_at_free(hr);
3811 free_list->add_ordered(hr);
3812 }
3813
3814 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
3815 FreeRegionList* free_list) {
3816 assert(hr->is_humongous(), "this is only for humongous regions");
3817 assert(free_list != NULL, "pre-condition");
3818 hr->clear_humongous();
3819 free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
3820 }
3821
3822 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
3823 const uint humongous_regions_removed) {
3824 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
3825 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
3826 _old_set.bulk_remove(old_regions_removed);
3827 _humongous_set.bulk_remove(humongous_regions_removed);
3828 }
3829
3830 }
3831
3832 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
3833 assert(list != NULL, "list can't be null");
3834 if (!list->is_empty()) {
3835 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
3836 _hrm.insert_list_into_free_list(list);
3837 }
3838 }
3839
3840 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
3841 decrease_used(bytes);
3842 }
3843
3844 class G1FreeCollectionSetTask : public AbstractGangTask {
3845 private:
3846
3847 // Closure applied to all regions in the collection set to do work that needs to
3848 // be done serially in a single thread.
3849 class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
3850 private:
3851 EvacuationInfo* _evacuation_info;
3852 const size_t* _surviving_young_words;
3853
3854 // Bytes used in successfully evacuated regions before the evacuation.
3855 size_t _before_used_bytes;
3856 // Bytes used in unsucessfully evacuated regions before the evacuation
3896 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
3897 _before_used_bytes += r->used();
3898 g1h->free_region(r,
3899 &_local_free_list,
3900 true, /* skip_remset */
3901 true, /* skip_hot_card_cache */
3902 true /* locked */);
3903 } else {
3904 r->uninstall_surv_rate_group();
3905 r->set_young_index_in_cset(-1);
3906 r->set_evacuation_failed(false);
3907 // When moving a young gen region to old gen, we "allocate" that whole region
3908 // there. This is in addition to any already evacuated objects. Notify the
3909 // policy about that.
3910 // Old gen regions do not cause an additional allocation: both the objects
3911 // still in the region and the ones already moved are accounted for elsewhere.
3912 if (r->is_young()) {
3913 _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
3914 }
3915 // The region is now considered to be old.
3916 r->set_old();
3917 // Do some allocation statistics accounting. Regions that failed evacuation
3918 // are always made old, so there is no need to update anything in the young
3919 // gen statistics, but we need to update old gen statistics.
3920 size_t used_words = r->marked_bytes() / HeapWordSize;
3921
3922 _failure_used_words += used_words;
3923 _failure_waste_words += HeapRegion::GrainWords - used_words;
3924
3925 g1h->old_set_add(r);
3926 _after_used_bytes += r->used();
3927 }
3928 return false;
3929 }
3930
3931 void complete_work() {
3932 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3933
3934 _evacuation_info->set_regions_freed(_local_free_list.length());
3935 _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
3936
4349 return false;
4350 }
4351
4352 ~TearDownRegionSetsClosure() {
4353 assert(_old_set->is_empty(), "post-condition");
4354 }
4355 };
4356
4357 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
4358 assert_at_safepoint_on_vm_thread();
4359
4360 if (!free_list_only) {
4361 TearDownRegionSetsClosure cl(&_old_set);
4362 heap_region_iterate(&cl);
4363
4364 // Note that emptying the _young_list is postponed and instead done as
4365 // the first step when rebuilding the regions sets again. The reason for
4366 // this is that during a full GC string deduplication needs to know if
4367 // a collected region was young or old when the full GC was initiated.
4368 }
4369 _hrm.remove_all_free_regions();
4370 }
4371
4372 void G1CollectedHeap::increase_used(size_t bytes) {
4373 _summary_bytes_used += bytes;
4374 }
4375
4376 void G1CollectedHeap::decrease_used(size_t bytes) {
4377 assert(_summary_bytes_used >= bytes,
4378 "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
4379 _summary_bytes_used, bytes);
4380 _summary_bytes_used -= bytes;
4381 }
4382
4383 void G1CollectedHeap::set_used(size_t bytes) {
4384 _summary_bytes_used = bytes;
4385 }
4386
4387 class RebuildRegionSetsClosure : public HeapRegionClosure {
4388 private:
4389 bool _free_list_only;
4424 }
4425 _total_used += r->used();
4426 }
4427
4428 return false;
4429 }
4430
4431 size_t total_used() {
4432 return _total_used;
4433 }
4434 };
4435
4436 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4437 assert_at_safepoint_on_vm_thread();
4438
4439 if (!free_list_only) {
4440 _eden.clear();
4441 _survivor.clear();
4442 }
4443
4444 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4445 heap_region_iterate(&cl);
4446
4447 if (!free_list_only) {
4448 set_used(cl.total_used());
4449 if (_archive_allocator != NULL) {
4450 _archive_allocator->clear_used();
4451 }
4452 }
4453 assert(used() == recalculate_used(),
4454 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4455 used(), recalculate_used());
4456 }
4457
4458 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4459 HeapRegion* hr = heap_region_containing(p);
4460 return hr->is_in(p);
4461 }
4462
4463 // Methods for the mutator alloc region
4464
4532 new_alloc_region->note_start_of_copying(during_im);
4533 return new_alloc_region;
4534 }
4535 return NULL;
4536 }
4537
4538 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4539 size_t allocated_bytes,
4540 InCSetState dest) {
4541 bool during_im = collector_state()->in_initial_mark_gc();
4542 alloc_region->note_end_of_copying(during_im);
4543 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
4544 if (dest.is_old()) {
4545 old_set_add(alloc_region);
4546 }
4547 _hr_printer.retire(alloc_region);
4548 }
4549
4550 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4551 bool expanded = false;
4552 uint index = _hrm.find_highest_free(&expanded);
4553
4554 if (index != G1_NO_HRM_INDEX) {
4555 if (expanded) {
4556 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4557 HeapRegion::GrainWords * HeapWordSize);
4558 }
4559 _hrm.allocate_free_regions_starting_at(index, 1);
4560 return region_at(index);
4561 }
4562 return NULL;
4563 }
4564
4565 // Optimized nmethod scanning
4566
4567 class RegisterNMethodOopClosure: public OopClosure {
4568 G1CollectedHeap* _g1h;
4569 nmethod* _nm;
4570
4571 template <class T> void do_oop_work(T* p) {
4572 T heap_oop = RawAccess<>::oop_load(p);
4573 if (!CompressedOops::is_null(heap_oop)) {
4574 oop obj = CompressedOops::decode_not_null(heap_oop);
4575 HeapRegion* hr = _g1h->heap_region_containing(obj);
4576 assert(!hr->is_continues_humongous(),
4577 "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4578 " starting at " HR_FORMAT,
4579 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
|
149
150 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
151 // The from card cache is not the memory that is actually committed. So we cannot
152 // take advantage of the zero_filled parameter.
153 reset_from_card_cache(start_idx, num_regions);
154 }
155
156
157 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
158 MemRegion mr) {
159 return new HeapRegion(hrs_index, bot(), mr);
160 }
161
162 // Private methods.
163
164 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
165 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
166 "the only time we use this to allocate a humongous region is "
167 "when we are allocating a single humongous region");
168
169 HeapRegion* res = _hrm->allocate_free_region(is_old);
170
171 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
172 // Currently, only attempts to allocate GC alloc regions set
173 // do_expand to true. So, we should only reach here during a
174 // safepoint. If this assumption changes we might have to
175 // reconsider the use of _expand_heap_after_alloc_failure.
176 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
177
178 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
179 word_size * HeapWordSize);
180
181 if (expand(word_size * HeapWordSize)) {
182 // Given that expand() succeeded in expanding the heap, and we
183 // always expand the heap by an amount aligned to the heap
184 // region size, the free list should in theory not be empty.
185 // In either case allocate_free_region() will check for NULL.
186 res = _hrm->allocate_free_region(is_old);
187 } else {
188 _expand_heap_after_alloc_failure = false;
189 }
190 }
191 return res;
192 }
193
194 HeapWord*
195 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
196 uint num_regions,
197 size_t word_size) {
198 assert(first != G1_NO_HRM_INDEX, "pre-condition");
199 assert(is_humongous(word_size), "word_size should be humongous");
200 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
201
202 // Index of last region in the series.
203 uint last = first + num_regions - 1;
204
205 // We need to initialize the region(s) we just discovered. This is
206 // a bit tricky given that it can happen concurrently with
320 // Otherwise, if using ex regions might help, try with ex given back.
321 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
322 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
323
324 _verifier->verify_region_sets_optional();
325
326 uint first = G1_NO_HRM_INDEX;
327 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
328
329 if (obj_regions == 1) {
330 // Only one region to allocate, try to use a fast path by directly allocating
331 // from the free lists. Do not try to expand here, we will potentially do that
332 // later.
333 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
334 if (hr != NULL) {
335 first = hr->hrm_index();
336 }
337 } else {
338 // Policy: Try only empty regions (i.e. already committed first). Maybe we
339 // are lucky enough to find some.
340 first = _hrm->find_contiguous_only_empty(obj_regions);
341 if (first != G1_NO_HRM_INDEX) {
342 _hrm->allocate_free_regions_starting_at(first, obj_regions);
343 }
344 }
345
346 if (first == G1_NO_HRM_INDEX) {
347 // Policy: We could not find enough regions for the humongous object in the
348 // free list. Look through the heap to find a mix of free and uncommitted regions.
349 // If so, try expansion.
350 first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
351 if (first != G1_NO_HRM_INDEX) {
352 // We found something. Make sure these regions are committed, i.e. expand
353 // the heap. Alternatively we could do a defragmentation GC.
354 log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
355 word_size * HeapWordSize);
356
357 _hrm->expand_at(first, obj_regions, workers());
358 g1_policy()->record_new_heap_size(num_regions());
359
360 #ifdef ASSERT
361 for (uint i = first; i < first + obj_regions; ++i) {
362 HeapRegion* hr = region_at(i);
363 assert(hr->is_free(), "sanity");
364 assert(hr->is_empty(), "sanity");
365 assert(is_on_master_free_list(hr), "sanity");
366 }
367 #endif
368 _hrm->allocate_free_regions_starting_at(first, obj_regions);
369 } else {
370 // Policy: Potentially trigger a defragmentation GC.
371 }
372 }
373
374 HeapWord* result = NULL;
375 if (first != G1_NO_HRM_INDEX) {
376 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
377 assert(result != NULL, "it should always return a valid result");
378
379 // A successful humongous object allocation changes the used space
380 // information of the old generation so we need to recalculate the
381 // sizes and update the jstat counters here.
382 g1mm()->update_sizes();
383 }
384
385 _verifier->verify_region_sets_optional();
386
387 return result;
388 }
537 return NULL;
538 }
539 return _archive_allocator->archive_mem_allocate(word_size);
540 }
541
542 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
543 size_t end_alignment_in_bytes) {
544 assert_at_safepoint_on_vm_thread();
545 assert(_archive_allocator != NULL, "_archive_allocator not initialized");
546
547 // Call complete_archive to do the real work, filling in the MemRegion
548 // array with the archive regions.
549 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
550 delete _archive_allocator;
551 _archive_allocator = NULL;
552 }
553
554 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
555 assert(ranges != NULL, "MemRegion array NULL");
556 assert(count != 0, "No MemRegions provided");
557 MemRegion reserved = _hrm->reserved();
558 for (size_t i = 0; i < count; i++) {
559 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
560 return false;
561 }
562 }
563 return true;
564 }
565
566 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
567 size_t count,
568 bool open) {
569 assert(!is_init_completed(), "Expect to be called at JVM init time");
570 assert(ranges != NULL, "MemRegion array NULL");
571 assert(count != 0, "No MemRegions provided");
572 MutexLockerEx x(Heap_lock);
573
574 MemRegion reserved = _hrm->reserved();
575 HeapWord* prev_last_addr = NULL;
576 HeapRegion* prev_last_region = NULL;
577
578 // Temporarily disable pretouching of heap pages. This interface is used
579 // when mmap'ing archived heap data in, so pre-touching is wasted.
580 FlagSetting fs(AlwaysPreTouch, false);
581
582 // Enable archive object checking used by G1MarkSweep. We have to let it know
583 // about each archive range, so that objects in those ranges aren't marked.
584 G1ArchiveAllocator::enable_archive_object_check();
585
586 // For each specified MemRegion range, allocate the corresponding G1
587 // regions and mark them as archive regions. We expect the ranges
588 // in ascending starting address order, without overlap.
589 for (size_t i = 0; i < count; i++) {
590 MemRegion curr_range = ranges[i];
591 HeapWord* start_address = curr_range.start();
592 size_t word_size = curr_range.word_size();
593 HeapWord* last_address = curr_range.last();
594 size_t commits = 0;
595
596 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
597 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
598 p2i(start_address), p2i(last_address));
599 guarantee(start_address > prev_last_addr,
600 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
601 p2i(start_address), p2i(prev_last_addr));
602 prev_last_addr = last_address;
603
604 // Check for ranges that start in the same G1 region in which the previous
605 // range ended, and adjust the start address so we don't try to allocate
606 // the same region again. If the current range is entirely within that
607 // region, skip it, just adjusting the recorded top.
608 HeapRegion* start_region = _hrm->addr_to_region(start_address);
609 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
610 start_address = start_region->end();
611 if (start_address > last_address) {
612 increase_used(word_size * HeapWordSize);
613 start_region->set_top(last_address + 1);
614 continue;
615 }
616 start_region->set_top(start_address);
617 curr_range = MemRegion(start_address, last_address + 1);
618 start_region = _hrm->addr_to_region(start_address);
619 }
620
621 // Perform the actual region allocation, exiting if it fails.
622 // Then note how much new space we have allocated.
623 if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
624 return false;
625 }
626 increase_used(word_size * HeapWordSize);
627 if (commits != 0) {
628 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
629 HeapRegion::GrainWords * HeapWordSize * commits);
630
631 }
632
633 // Mark each G1 region touched by the range as archive, add it to
634 // the old set, and set top.
635 HeapRegion* curr_region = _hrm->addr_to_region(start_address);
636 HeapRegion* last_region = _hrm->addr_to_region(last_address);
637 prev_last_region = last_region;
638
639 while (curr_region != NULL) {
640 assert(curr_region->is_empty() && !curr_region->is_pinned(),
641 "Region already in use (index %u)", curr_region->hrm_index());
642 if (open) {
643 curr_region->set_open_archive();
644 } else {
645 curr_region->set_closed_archive();
646 }
647 _hr_printer.alloc(curr_region);
648 _archive_set.add(curr_region);
649 HeapWord* top;
650 HeapRegion* next_region;
651 if (curr_region != last_region) {
652 top = curr_region->end();
653 next_region = _hrm->next_region_in_heap(curr_region);
654 } else {
655 top = last_address + 1;
656 next_region = NULL;
657 }
658 curr_region->set_top(top);
659 curr_region->set_first_dead(top);
660 curr_region->set_end_of_live(top);
661 curr_region = next_region;
662 }
663
664 // Notify mark-sweep of the archive
665 G1ArchiveAllocator::set_range_archive(curr_range, open);
666 }
667 return true;
668 }
669
670 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
671 assert(!is_init_completed(), "Expect to be called at JVM init time");
672 assert(ranges != NULL, "MemRegion array NULL");
673 assert(count != 0, "No MemRegions provided");
674 MemRegion reserved = _hrm->reserved();
675 HeapWord *prev_last_addr = NULL;
676 HeapRegion* prev_last_region = NULL;
677
678 // For each MemRegion, create filler objects, if needed, in the G1 regions
679 // that contain the address range. The address range actually within the
680 // MemRegion will not be modified. That is assumed to have been initialized
681 // elsewhere, probably via an mmap of archived heap data.
682 MutexLockerEx x(Heap_lock);
683 for (size_t i = 0; i < count; i++) {
684 HeapWord* start_address = ranges[i].start();
685 HeapWord* last_address = ranges[i].last();
686
687 assert(reserved.contains(start_address) && reserved.contains(last_address),
688 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
689 p2i(start_address), p2i(last_address));
690 assert(start_address > prev_last_addr,
691 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
692 p2i(start_address), p2i(prev_last_addr));
693
694 HeapRegion* start_region = _hrm->addr_to_region(start_address);
695 HeapRegion* last_region = _hrm->addr_to_region(last_address);
696 HeapWord* bottom_address = start_region->bottom();
697
698 // Check for a range beginning in the same region in which the
699 // previous one ended.
700 if (start_region == prev_last_region) {
701 bottom_address = prev_last_addr + 1;
702 }
703
704 // Verify that the regions were all marked as archive regions by
705 // alloc_archive_regions.
706 HeapRegion* curr_region = start_region;
707 while (curr_region != NULL) {
708 guarantee(curr_region->is_archive(),
709 "Expected archive region at index %u", curr_region->hrm_index());
710 if (curr_region != last_region) {
711 curr_region = _hrm->next_region_in_heap(curr_region);
712 } else {
713 curr_region = NULL;
714 }
715 }
716
717 prev_last_addr = last_address;
718 prev_last_region = last_region;
719
720 // Fill the memory below the allocated range with dummy object(s),
721 // if the region bottom does not match the range start, or if the previous
722 // range ended within the same G1 region, and there is a gap.
723 if (start_address != bottom_address) {
724 size_t fill_size = pointer_delta(start_address, bottom_address);
725 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
726 increase_used(fill_size * HeapWordSize);
727 }
728 }
729 }
730
731 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
740 if (result == NULL) {
741 *actual_word_size = desired_word_size;
742 result = attempt_allocation_slow(desired_word_size);
743 }
744
745 assert_heap_not_locked();
746 if (result != NULL) {
747 assert(*actual_word_size != 0, "Actual size must have been set here");
748 dirty_young_block(result, *actual_word_size);
749 } else {
750 *actual_word_size = 0;
751 }
752
753 return result;
754 }
755
756 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
757 assert(!is_init_completed(), "Expect to be called at JVM init time");
758 assert(ranges != NULL, "MemRegion array NULL");
759 assert(count != 0, "No MemRegions provided");
760 MemRegion reserved = _hrm->reserved();
761 HeapWord* prev_last_addr = NULL;
762 HeapRegion* prev_last_region = NULL;
763 size_t size_used = 0;
764 size_t uncommitted_regions = 0;
765
766 // For each Memregion, free the G1 regions that constitute it, and
767 // notify mark-sweep that the range is no longer to be considered 'archive.'
768 MutexLockerEx x(Heap_lock);
769 for (size_t i = 0; i < count; i++) {
770 HeapWord* start_address = ranges[i].start();
771 HeapWord* last_address = ranges[i].last();
772
773 assert(reserved.contains(start_address) && reserved.contains(last_address),
774 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
775 p2i(start_address), p2i(last_address));
776 assert(start_address > prev_last_addr,
777 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
778 p2i(start_address), p2i(prev_last_addr));
779 size_used += ranges[i].byte_size();
780 prev_last_addr = last_address;
781
782 HeapRegion* start_region = _hrm->addr_to_region(start_address);
783 HeapRegion* last_region = _hrm->addr_to_region(last_address);
784
785 // Check for ranges that start in the same G1 region in which the previous
786 // range ended, and adjust the start address so we don't try to free
787 // the same region again. If the current range is entirely within that
788 // region, skip it.
789 if (start_region == prev_last_region) {
790 start_address = start_region->end();
791 if (start_address > last_address) {
792 continue;
793 }
794 start_region = _hrm->addr_to_region(start_address);
795 }
796 prev_last_region = last_region;
797
798 // After verifying that each region was marked as an archive region by
799 // alloc_archive_regions, set it free and empty and uncommit it.
800 HeapRegion* curr_region = start_region;
801 while (curr_region != NULL) {
802 guarantee(curr_region->is_archive(),
803 "Expected archive region at index %u", curr_region->hrm_index());
804 uint curr_index = curr_region->hrm_index();
805 _archive_set.remove(curr_region);
806 curr_region->set_free();
807 curr_region->set_top(curr_region->bottom());
808 if (curr_region != last_region) {
809 curr_region = _hrm->next_region_in_heap(curr_region);
810 } else {
811 curr_region = NULL;
812 }
813 _hrm->shrink_at(curr_index, 1);
814 uncommitted_regions++;
815 }
816
817 // Notify mark-sweep that this is no longer an archive range.
818 G1ArchiveAllocator::set_range_archive(ranges[i], false);
819 }
820
821 if (uncommitted_regions != 0) {
822 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
823 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
824 }
825 decrease_used(size_used);
826 }
827
828 oop G1CollectedHeap::materialize_archived_object(oop obj) {
829 assert(obj != NULL, "archived obj is NULL");
830 assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object");
831
832 // Loading an archived object makes it strongly reachable. If it is
833 // loaded during concurrent marking, it must be enqueued to the SATB
1008 _ref_processor_cm->verify_no_references_recorded();
1009
1010 // Abandon current iterations of concurrent marking and concurrent
1011 // refinement, if any are in progress.
1012 concurrent_mark()->concurrent_cycle_abort();
1013 }
1014
1015 void G1CollectedHeap::prepare_heap_for_full_collection() {
1016 // Make sure we'll choose a new allocation region afterwards.
1017 _allocator->release_mutator_alloc_region();
1018 _allocator->abandon_gc_alloc_regions();
1019 g1_rem_set()->cleanupHRRS();
1020
1021 // We may have added regions to the current incremental collection
1022 // set between the last GC or pause and now. We need to clear the
1023 // incremental collection set and then start rebuilding it afresh
1024 // after this full GC.
1025 abandon_collection_set(collection_set());
1026
1027 tear_down_region_sets(false /* free_list_only */);
1028
1029 hrm()->prepare_for_full_collection_start();
1030 }
1031
1032 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1033 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1034 assert(used() == recalculate_used(), "Should be equal");
1035 _verifier->verify_region_sets_optional();
1036 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1037 _verifier->check_bitmaps("Full GC Start");
1038 }
1039
1040 void G1CollectedHeap::prepare_heap_for_mutators() {
1041 hrm()->prepare_for_full_collection_end();
1042
1043 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1044 ClassLoaderDataGraph::purge();
1045 MetaspaceUtils::verify_metrics();
1046
1047 // Prepare heap for normal collections.
1048 assert(num_free_regions() == 0, "we should not have added any free regions");
1049 rebuild_region_sets(false /* free_list_only */);
1050 abort_refinement();
1051 resize_heap_if_necessary();
1052
1053 // Rebuild the strong code root lists for each region
1054 rebuild_strong_code_roots();
1055
1056 // Purge code root memory
1057 purge_code_root_memory();
1058
1059 // Start a new incremental collection set for the next pause
1060 start_new_collection_set();
1061
1062 _allocator->init_mutator_alloc_region();
1063
1064 // Post collection state updates.
1065 MetaspaceGC::compute_new_size();
1066 }
1067
1068 void G1CollectedHeap::abort_refinement() {
1069 if (_hot_card_cache->use_cache()) {
1070 _hot_card_cache->reset_hot_cache();
1071 }
1072
1073 // Discard all remembered set updates.
1074 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1075 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1076 }
1077
1078 void G1CollectedHeap::verify_after_full_collection() {
1079 _hrm->verify_optional();
1080 _verifier->verify_region_sets_optional();
1081 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1082 // Clear the previous marking bitmap, if needed for bitmap verification.
1083 // Note we cannot do this when we clear the next marking bitmap in
1084 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1085 // objects marked during a full GC against the previous bitmap.
1086 // But we need to clear it before calling check_bitmaps below since
1087 // the full GC has compacted objects and updated TAMS but not updated
1088 // the prev bitmap.
1089 if (G1VerifyBitmaps) {
1090 GCTraceTime(Debug, gc)("Clear Prev Bitmap for Verification");
1091 _cm->clear_prev_bitmap(workers());
1092 }
1093 // This call implicitly verifies that the next bitmap is clear after Full GC.
1094 _verifier->check_bitmaps("Full GC End");
1095
1096 // At this point there should be no regions in the
1097 // entire heap tagged as young.
1098 assert(check_young_list_empty(), "young list should be empty at this point");
1099
1311 // appropriate.
1312 return NULL;
1313 }
1314
1315 // Attempting to expand the heap sufficiently
1316 // to support an allocation of the given "word_size". If
1317 // successful, perform the allocation and return the address of the
1318 // allocated block, or else "NULL".
1319
1320 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1321 assert_at_safepoint_on_vm_thread();
1322
1323 _verifier->verify_region_sets_optional();
1324
1325 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1326 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1327 word_size * HeapWordSize);
1328
1329
1330 if (expand(expand_bytes, _workers)) {
1331 _hrm->verify_optional();
1332 _verifier->verify_region_sets_optional();
1333 return attempt_allocation_at_safepoint(word_size,
1334 false /* expect_null_mutator_alloc_region */);
1335 }
1336 return NULL;
1337 }
1338
1339 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1340 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1341 aligned_expand_bytes = align_up(aligned_expand_bytes,
1342 HeapRegion::GrainBytes);
1343
1344 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1345 expand_bytes, aligned_expand_bytes);
1346
1347 if (is_maximal_no_gc()) {
1348 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1349 return false;
1350 }
1351
1352 double expand_heap_start_time_sec = os::elapsedTime();
1353 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1354 assert(regions_to_expand > 0, "Must expand by at least one region");
1355
1356 uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1357 if (expand_time_ms != NULL) {
1358 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1359 }
1360
1361 if (expanded_by > 0) {
1362 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1363 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1364 g1_policy()->record_new_heap_size(num_regions());
1365 } else {
1366 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1367
1368 // The expansion of the virtual storage space was unsuccessful.
1369 // Let's see if it was because we ran out of swap.
1370 if (G1ExitOnExpansionFailure &&
1371 _hrm->available() >= regions_to_expand) {
1372 // We had head room...
1373 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1374 }
1375 }
1376 return regions_to_expand > 0;
1377 }
1378
1379 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1380 size_t aligned_shrink_bytes =
1381 ReservedSpace::page_align_size_down(shrink_bytes);
1382 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1383 HeapRegion::GrainBytes);
1384 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1385
1386 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1387 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1388
1389
1390 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1391 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1392 if (num_regions_removed > 0) {
1393 g1_policy()->record_new_heap_size(num_regions());
1394 } else {
1395 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1396 }
1397 }
1398
1399 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1400 _verifier->verify_region_sets_optional();
1401
1402 // We should only reach here at the end of a Full GC or during Remark which
1403 // means we should not not be holding to any GC alloc regions. The method
1404 // below will make sure of that and do any remaining clean up.
1405 _allocator->abandon_gc_alloc_regions();
1406
1407 // Instead of tearing down / rebuilding the free lists here, we
1408 // could instead use the remove_all_pending() method on free_list to
1409 // remove only the ones that we need to remove.
1410 tear_down_region_sets(true /* free_list_only */);
1411 shrink_helper(shrink_bytes);
1412 rebuild_region_sets(true /* free_list_only */);
1413
1414 _hrm->verify_optional();
1415 _verifier->verify_region_sets_optional();
1416 }
1417
1418 class OldRegionSetChecker : public HeapRegionSetChecker {
1419 public:
1420 void check_mt_safety() {
1421 // Master Old Set MT safety protocol:
1422 // (a) If we're at a safepoint, operations on the master old set
1423 // should be invoked:
1424 // - by the VM thread (which will serialize them), or
1425 // - by the GC workers while holding the FreeList_lock, if we're
1426 // at a safepoint for an evacuation pause (this lock is taken
1427 // anyway when an GC alloc region is retired so that a new one
1428 // is allocated from the free list), or
1429 // - by the GC workers while holding the OldSets_lock, if we're at a
1430 // safepoint for a cleanup pause.
1431 // (b) If we're not at a safepoint, operations on the master old set
1432 // should be invoked while holding the Heap_lock.
1433
1434 if (SafepointSynchronize::is_at_safepoint()) {
1472 guarantee(Heap_lock->owned_by_self(),
1473 "master humongous set MT safety protocol outside a safepoint");
1474 }
1475 }
1476 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1477 const char* get_description() { return "Humongous Regions"; }
1478 };
1479
1480 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1481 CollectedHeap(),
1482 _young_gen_sampling_thread(NULL),
1483 _workers(NULL),
1484 _collector_policy(collector_policy),
1485 _card_table(NULL),
1486 _soft_ref_policy(),
1487 _old_set("Old Region Set", new OldRegionSetChecker()),
1488 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1489 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1490 _bot(NULL),
1491 _listener(),
1492 _hrm(NULL),
1493 _is_hetero_heap(AllocateOldGenAt != NULL),
1494 _allocator(NULL),
1495 _verifier(NULL),
1496 _summary_bytes_used(0),
1497 _archive_allocator(NULL),
1498 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1499 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1500 _expand_heap_after_alloc_failure(true),
1501 _g1mm(NULL),
1502 _humongous_reclaim_candidates(),
1503 _has_humongous_reclaim_candidates(false),
1504 _hr_printer(),
1505 _collector_state(),
1506 _old_marking_cycles_started(0),
1507 _old_marking_cycles_completed(0),
1508 _eden(),
1509 _survivor(),
1510 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1511 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1512 _g1_policy(new G1Policy(_gc_timer_stw)),
1513 _heap_sizing_policy(NULL),
1606 return JNI_ENOMEM;
1607 }
1608 return JNI_OK;
1609 }
1610
1611 jint G1CollectedHeap::initialize() {
1612 os::enable_vtime();
1613
1614 // Necessary to satisfy locking discipline assertions.
1615
1616 MutexLocker x(Heap_lock);
1617
1618 // While there are no constraints in the GC code that HeapWordSize
1619 // be any particular value, there are multiple other areas in the
1620 // system which believe this to be true (e.g. oop->object_size in some
1621 // cases incorrectly returns the size in wordSize units rather than
1622 // HeapWordSize).
1623 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1624
1625 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1626 size_t max_byte_size = g1_collector_policy()->heap_reservation_size_bytes();
1627 size_t heap_alignment = collector_policy()->heap_alignment();
1628
1629 // Ensure that the sizes are properly aligned.
1630 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1631 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1632 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1633
1634 // Reserve the maximum.
1635
1636 // When compressed oops are enabled, the preferred heap base
1637 // is calculated by subtracting the requested size from the
1638 // 32Gb boundary and using the result as the base address for
1639 // heap reservation. If the requested size is not aligned to
1640 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1641 // into the ReservedHeapSpace constructor) then the actual
1642 // base of the reserved heap may end up differing from the
1643 // address that was requested (i.e. the preferred heap base).
1644 // If this happens then we could end up using a non-optimal
1645 // compressed oops mode.
1646
1670 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1671 &bs->dirty_card_queue_buffer_allocator(),
1672 -1, // temp. never trigger
1673 -1, // temp. no limit
1674 Shared_DirtyCardQ_lock,
1675 true); // init_free_ids
1676
1677 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1678 &bs->dirty_card_queue_buffer_allocator(),
1679 -1, // never trigger processing
1680 -1, // no limit on length
1681 Shared_DirtyCardQ_lock);
1682
1683 // Create the hot card cache.
1684 _hot_card_cache = new G1HotCardCache(this);
1685
1686 // Carve out the G1 part of the heap.
1687 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1688 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1689 G1RegionToSpaceMapper* heap_storage =
1690 G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1691 g1_rs.size(),
1692 page_size,
1693 HeapRegion::GrainBytes,
1694 1,
1695 mtJavaHeap);
1696 if(heap_storage == NULL) {
1697 vm_shutdown_during_initialization("Could not initialize G1 heap");
1698 return JNI_ERR;
1699 }
1700
1701 os::trace_page_sizes("Heap",
1702 collector_policy()->min_heap_byte_size(),
1703 max_byte_size,
1704 page_size,
1705 heap_rs.base(),
1706 heap_rs.size());
1707 heap_storage->set_mapping_changed_listener(&_listener);
1708
1709 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1710 G1RegionToSpaceMapper* bot_storage =
1711 create_aux_memory_mapper("Block Offset Table",
1712 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1713 G1BlockOffsetTable::heap_map_factor());
1714
1715 G1RegionToSpaceMapper* cardtable_storage =
1716 create_aux_memory_mapper("Card Table",
1717 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1718 G1CardTable::heap_map_factor());
1719
1720 G1RegionToSpaceMapper* card_counts_storage =
1721 create_aux_memory_mapper("Card Counts Table",
1722 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1723 G1CardCounts::heap_map_factor());
1724
1725 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1726 G1RegionToSpaceMapper* prev_bitmap_storage =
1727 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1728 G1RegionToSpaceMapper* next_bitmap_storage =
1729 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1730
1731 _hrm = HeapRegionManager::create_manager(this, collector_policy());
1732
1733 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1734 _card_table->initialize(cardtable_storage);
1735 // Do later initialization work for concurrent refinement.
1736 _hot_card_cache->initialize(card_counts_storage);
1737
1738 // 6843694 - ensure that the maximum region index can fit
1739 // in the remembered set structures.
1740 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1741 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1742
1743 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1744 // start within the first card.
1745 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1746 // Also create a G1 rem set.
1747 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1748 _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
1749
1750 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1751 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1752 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1753 "too many cards per region");
1754
1755 FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1756
1757 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1758
1759 {
1760 HeapWord* start = _hrm->reserved().start();
1761 HeapWord* end = _hrm->reserved().end();
1762 size_t granularity = HeapRegion::GrainBytes;
1763
1764 _in_cset_fast_test.initialize(start, end, granularity);
1765 _humongous_reclaim_candidates.initialize(start, end, granularity);
1766 }
1767
1768 // Create the G1ConcurrentMark data structure and thread.
1769 // (Must do this late, so that "max_regions" is defined.)
1770 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1771 if (_cm == NULL || !_cm->completed_initialization()) {
1772 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1773 return JNI_ENOMEM;
1774 }
1775 _cm_thread = _cm->cm_thread();
1776
1777 // Now expand into the initial heap size.
1778 if (!expand(init_byte_size, _workers)) {
1779 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1780 return JNI_ENOMEM;
1781 }
1782
1783 // Perform any initialization actions delegated to the policy.
1784 g1_policy()->init(this, &_collection_set);
1785 // Now we know the target length of young list. So adjust the heap to provision that many regions on dram.
1786 if (is_hetero_heap()) {
1787 static_cast<HeterogeneousHeapRegionManager*>(hrm())->adjust_dram_regions((uint)g1_policy()->young_list_target_length(), workers());
1788 }
1789
1790 jint ecode = initialize_concurrent_refinement();
1791 if (ecode != JNI_OK) {
1792 return ecode;
1793 }
1794
1795 ecode = initialize_young_gen_sampling_thread();
1796 if (ecode != JNI_OK) {
1797 return ecode;
1798 }
1799
1800 {
1801 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1802 dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
1803 dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
1804 }
1805
1806 // Here we allocate the dummy HeapRegion that is required by the
1807 // G1AllocRegion class.
1808 HeapRegion* dummy_region = _hrm->get_dummy_region();
1809
1810 // We'll re-use the same region whether the alloc region will
1811 // require BOT updates or not and, if it doesn't, then a non-young
1812 // region will complain that it cannot support allocations without
1813 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1814 dummy_region->set_eden();
1815 // Make sure it's full.
1816 dummy_region->set_top(dummy_region->end());
1817 G1AllocRegion::setup(this, dummy_region);
1818
1819 _allocator->init_mutator_alloc_region();
1820
1821 // Do create of the monitoring and management support so that
1822 // values in the heap have been properly initialized.
1823 _g1mm = new G1MonitoringSupport(this);
1824
1825 G1StringDedup::initialize();
1826
1827 _preserved_marks_set.init(ParallelGCThreads);
1828
1908 false, // Reference discovery is not atomic
1909 &_is_alive_closure_cm, // is alive closure
1910 true); // allow changes to number of processing threads
1911
1912 // STW ref processor
1913 _ref_processor_stw =
1914 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1915 mt_processing, // mt processing
1916 ParallelGCThreads, // degree of mt processing
1917 (ParallelGCThreads > 1), // mt discovery
1918 ParallelGCThreads, // degree of mt discovery
1919 true, // Reference discovery is atomic
1920 &_is_alive_closure_stw, // is alive closure
1921 true); // allow changes to number of processing threads
1922 }
1923
1924 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1925 return _collector_policy;
1926 }
1927
1928 G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
1929 return _collector_policy;
1930 }
1931
1932 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1933 return &_soft_ref_policy;
1934 }
1935
1936 size_t G1CollectedHeap::capacity() const {
1937 return _hrm->length() * HeapRegion::GrainBytes;
1938 }
1939
1940 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1941 return _hrm->total_free_bytes();
1942 }
1943
1944 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1945 _hot_card_cache->drain(cl, worker_i);
1946 }
1947
1948 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1949 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1950 size_t n_completed_buffers = 0;
1951 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1952 n_completed_buffers++;
1953 }
1954 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1955 dcqs.clear_n_completed_buffers();
1956 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1957 }
1958
1959 // Computes the sum of the storage used by the various regions.
1960 size_t G1CollectedHeap::used() const {
1961 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2136 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2137
2138 // Schedule a standard evacuation pause. We're setting word_size
2139 // to 0 which means that we are not requesting a post-GC allocation.
2140 VM_G1CollectForAllocation op(0, /* word_size */
2141 gc_count_before,
2142 cause,
2143 false, /* should_initiate_conc_mark */
2144 g1_policy()->max_pause_time_ms());
2145 VMThread::execute(&op);
2146 } else {
2147 // Schedule a Full GC.
2148 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2149 VMThread::execute(&op);
2150 }
2151 }
2152 } while (retry_gc);
2153 }
2154
2155 bool G1CollectedHeap::is_in(const void* p) const {
2156 if (_hrm->reserved().contains(p)) {
2157 // Given that we know that p is in the reserved space,
2158 // heap_region_containing() should successfully
2159 // return the containing region.
2160 HeapRegion* hr = heap_region_containing(p);
2161 return hr->is_in(p);
2162 } else {
2163 return false;
2164 }
2165 }
2166
2167 #ifdef ASSERT
2168 bool G1CollectedHeap::is_in_exact(const void* p) const {
2169 bool contains = reserved_region().contains(p);
2170 bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2171 if (contains && available) {
2172 return true;
2173 } else {
2174 return false;
2175 }
2176 }
2177 #endif
2178
2179 // Iteration functions.
2180
2181 // Iterates an ObjectClosure over all objects within a HeapRegion.
2182
2183 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2184 ObjectClosure* _cl;
2185 public:
2186 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2187 bool do_heap_region(HeapRegion* r) {
2188 if (!r->is_continues_humongous()) {
2189 r->object_iterate(_cl);
2190 }
2191 return false;
2192 }
2193 };
2194
2195 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2196 IterateObjectClosureRegionClosure blk(cl);
2197 heap_region_iterate(&blk);
2198 }
2199
2200 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2201 _hrm->iterate(cl);
2202 }
2203
2204 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2205 HeapRegionClaimer *hrclaimer,
2206 uint worker_id) const {
2207 _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2208 }
2209
2210 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2211 HeapRegionClaimer *hrclaimer) const {
2212 _hrm->par_iterate(cl, hrclaimer, 0);
2213 }
2214
2215 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2216 _collection_set.iterate(cl);
2217 }
2218
2219 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2220 _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2221 }
2222
2223 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2224 HeapRegion* hr = heap_region_containing(addr);
2225 return hr->block_start(addr);
2226 }
2227
2228 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2229 HeapRegion* hr = heap_region_containing(addr);
2230 return hr->block_size(addr);
2231 }
2232
2241
2242 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2243 return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2244 }
2245
2246 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2247 return _eden.length() * HeapRegion::GrainBytes;
2248 }
2249
2250 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2251 // must be equal to the humongous object limit.
2252 size_t G1CollectedHeap::max_tlab_size() const {
2253 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2254 }
2255
2256 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2257 return _allocator->unsafe_max_tlab_alloc();
2258 }
2259
2260 size_t G1CollectedHeap::max_capacity() const {
2261 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2262 }
2263
2264 size_t G1CollectedHeap::max_reserved_capacity() const {
2265 return _hrm->max_length() * HeapRegion::GrainBytes;
2266 }
2267
2268 jlong G1CollectedHeap::millis_since_last_gc() {
2269 // See the notes in GenCollectedHeap::millis_since_last_gc()
2270 // for more information about the implementation.
2271 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2272 _g1_policy->collection_pause_end_millis();
2273 if (ret_val < 0) {
2274 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2275 ". returning zero instead.", ret_val);
2276 return 0;
2277 }
2278 return ret_val;
2279 }
2280
2281 void G1CollectedHeap::deduplicate_string(oop str) {
2282 assert(java_lang_String::is_instance(str), "invariant");
2283
2284 if (G1StringDedup::is_enabled()) {
2285 G1StringDedup::deduplicate(str);
2335 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2336 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
2337 default: ShouldNotReachHere();
2338 }
2339 return false; // keep some compilers happy
2340 }
2341
2342 void G1CollectedHeap::print_heap_regions() const {
2343 LogTarget(Trace, gc, heap, region) lt;
2344 if (lt.is_enabled()) {
2345 LogStream ls(lt);
2346 print_regions_on(&ls);
2347 }
2348 }
2349
2350 void G1CollectedHeap::print_on(outputStream* st) const {
2351 st->print(" %-20s", "garbage-first heap");
2352 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2353 capacity()/K, used_unlocked()/K);
2354 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
2355 p2i(_hrm->reserved().start()),
2356 p2i(_hrm->reserved().end()));
2357 st->cr();
2358 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2359 uint young_regions = young_regions_count();
2360 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2361 (size_t) young_regions * HeapRegion::GrainBytes / K);
2362 uint survivor_regions = survivor_regions_count();
2363 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2364 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2365 st->cr();
2366 MetaspaceUtils::print_on(st);
2367 }
2368
2369 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2370 st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2371 "HS=humongous(starts), HC=humongous(continues), "
2372 "CS=collection set, F=free, A=archive, "
2373 "TAMS=top-at-mark-start (previous, next)");
2374 PrintRegionClosure blk(st);
2375 heap_region_iterate(&blk);
2376 }
2511 // This summary needs to be printed before incrementing total collections.
2512 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2513
2514 // Update common counters.
2515 increment_total_collections(full /* full gc */);
2516 if (full) {
2517 increment_old_marking_cycles_started();
2518 }
2519
2520 // Fill TLAB's and such
2521 double start = os::elapsedTime();
2522 ensure_parsability(true);
2523 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2524 }
2525
2526 void G1CollectedHeap::gc_epilogue(bool full) {
2527 // Update common counters.
2528 if (full) {
2529 // Update the number of full collections that have been completed.
2530 increment_old_marking_cycles_completed(false /* concurrent */);
2531 // Now we know the target length of young list. So adjust the heap to provision that many regions on dram.
2532 if (is_hetero_heap()) {
2533 static_cast<HeterogeneousHeapRegionManager*>(hrm())->adjust_dram_regions((uint)g1_policy()->young_list_target_length(), workers());
2534 }
2535 }
2536
2537 // We are at the end of the GC. Total collections has already been increased.
2538 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2539
2540 // FIXME: what is this about?
2541 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2542 // is set.
2543 #if COMPILER2_OR_JVMCI
2544 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2545 #endif
2546 // always_do_update_barrier = true;
2547
2548 double start = os::elapsedTime();
2549 resize_all_tlabs();
2550 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2551
2552 MemoryService::track_memory_usage();
2553 // We have just completed a GC. Update the soft reference
2554 // policy with the new heap occupancy
3126 #ifdef TRACESPINNING
3127 ParallelTaskTerminator::print_termination_counts();
3128 #endif
3129
3130 gc_epilogue(false);
3131 }
3132
3133 // Print the remainder of the GC log output.
3134 if (evacuation_failed()) {
3135 log_info(gc)("To-space exhausted");
3136 }
3137
3138 g1_policy()->print_phases();
3139 heap_transition.print();
3140
3141 // It is not yet to safe to tell the concurrent mark to
3142 // start as we have some optional output below. We don't want the
3143 // output from the concurrent mark thread interfering with this
3144 // logging output either.
3145
3146 _hrm->verify_optional();
3147 _verifier->verify_region_sets_optional();
3148
3149 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3150 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3151
3152 print_heap_after_gc();
3153 print_heap_regions();
3154 trace_heap_after_gc(_gc_tracer_stw);
3155
3156 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3157 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3158 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3159 // before any GC notifications are raised.
3160 g1mm()->update_sizes();
3161
3162 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3163 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3164 _gc_timer_stw->register_gc_end();
3165 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3166 }
3803 DerivedPointerTable::update_pointers();
3804 g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
3805 #endif
3806 g1_policy()->print_age_table();
3807 }
3808
3809 void G1CollectedHeap::record_obj_copy_mem_stats() {
3810 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
3811
3812 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
3813 create_g1_evac_summary(&_old_evac_stats));
3814 }
3815
3816 void G1CollectedHeap::free_region(HeapRegion* hr,
3817 FreeRegionList* free_list,
3818 bool skip_remset,
3819 bool skip_hot_card_cache,
3820 bool locked) {
3821 assert(!hr->is_free(), "the region should not be free");
3822 assert(!hr->is_empty(), "the region should not be empty");
3823 assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
3824 assert(free_list != NULL, "pre-condition");
3825
3826 if (G1VerifyBitmaps) {
3827 MemRegion mr(hr->bottom(), hr->end());
3828 concurrent_mark()->clear_range_in_prev_bitmap(mr);
3829 }
3830
3831 // Clear the card counts for this region.
3832 // Note: we only need to do this if the region is not young
3833 // (since we don't refine cards in young regions).
3834 if (!skip_hot_card_cache && !hr->is_young()) {
3835 _hot_card_cache->reset_card_counts(hr);
3836 }
3837 hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
3838 _g1_policy->remset_tracker()->update_at_free(hr);
3839 free_list->add_ordered(hr);
3840 }
3841
3842 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
3843 FreeRegionList* free_list) {
3844 assert(hr->is_humongous(), "this is only for humongous regions");
3845 assert(free_list != NULL, "pre-condition");
3846 hr->clear_humongous();
3847 free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
3848 }
3849
3850 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
3851 const uint humongous_regions_removed) {
3852 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
3853 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
3854 _old_set.bulk_remove(old_regions_removed);
3855 _humongous_set.bulk_remove(humongous_regions_removed);
3856 }
3857
3858 }
3859
3860 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
3861 assert(list != NULL, "list can't be null");
3862 if (!list->is_empty()) {
3863 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
3864 _hrm->insert_list_into_free_list(list);
3865 }
3866 }
3867
3868 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
3869 decrease_used(bytes);
3870 }
3871
3872 class G1FreeCollectionSetTask : public AbstractGangTask {
3873 private:
3874
3875 // Closure applied to all regions in the collection set to do work that needs to
3876 // be done serially in a single thread.
3877 class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
3878 private:
3879 EvacuationInfo* _evacuation_info;
3880 const size_t* _surviving_young_words;
3881
3882 // Bytes used in successfully evacuated regions before the evacuation.
3883 size_t _before_used_bytes;
3884 // Bytes used in unsucessfully evacuated regions before the evacuation
3924 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
3925 _before_used_bytes += r->used();
3926 g1h->free_region(r,
3927 &_local_free_list,
3928 true, /* skip_remset */
3929 true, /* skip_hot_card_cache */
3930 true /* locked */);
3931 } else {
3932 r->uninstall_surv_rate_group();
3933 r->set_young_index_in_cset(-1);
3934 r->set_evacuation_failed(false);
3935 // When moving a young gen region to old gen, we "allocate" that whole region
3936 // there. This is in addition to any already evacuated objects. Notify the
3937 // policy about that.
3938 // Old gen regions do not cause an additional allocation: both the objects
3939 // still in the region and the ones already moved are accounted for elsewhere.
3940 if (r->is_young()) {
3941 _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
3942 }
3943 // The region is now considered to be old.
3944 if(g1h->is_hetero_heap()) {
3945 if(!r->is_old()) {
3946 // The region was young before, set it as pre-matured old so that next mixed gc can move
3947 // its contents to old region which is on nv-dimm
3948 r->set_premature_old();
3949 }
3950 } else {
3951 r->set_old();
3952 }
3953 // Do some allocation statistics accounting. Regions that failed evacuation
3954 // are always made old, so there is no need to update anything in the young
3955 // gen statistics, but we need to update old gen statistics.
3956 size_t used_words = r->marked_bytes() / HeapWordSize;
3957
3958 _failure_used_words += used_words;
3959 _failure_waste_words += HeapRegion::GrainWords - used_words;
3960
3961 g1h->old_set_add(r);
3962 _after_used_bytes += r->used();
3963 }
3964 return false;
3965 }
3966
3967 void complete_work() {
3968 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3969
3970 _evacuation_info->set_regions_freed(_local_free_list.length());
3971 _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
3972
4385 return false;
4386 }
4387
4388 ~TearDownRegionSetsClosure() {
4389 assert(_old_set->is_empty(), "post-condition");
4390 }
4391 };
4392
4393 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
4394 assert_at_safepoint_on_vm_thread();
4395
4396 if (!free_list_only) {
4397 TearDownRegionSetsClosure cl(&_old_set);
4398 heap_region_iterate(&cl);
4399
4400 // Note that emptying the _young_list is postponed and instead done as
4401 // the first step when rebuilding the regions sets again. The reason for
4402 // this is that during a full GC string deduplication needs to know if
4403 // a collected region was young or old when the full GC was initiated.
4404 }
4405 _hrm->remove_all_free_regions();
4406 }
4407
4408 void G1CollectedHeap::increase_used(size_t bytes) {
4409 _summary_bytes_used += bytes;
4410 }
4411
4412 void G1CollectedHeap::decrease_used(size_t bytes) {
4413 assert(_summary_bytes_used >= bytes,
4414 "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
4415 _summary_bytes_used, bytes);
4416 _summary_bytes_used -= bytes;
4417 }
4418
4419 void G1CollectedHeap::set_used(size_t bytes) {
4420 _summary_bytes_used = bytes;
4421 }
4422
4423 class RebuildRegionSetsClosure : public HeapRegionClosure {
4424 private:
4425 bool _free_list_only;
4460 }
4461 _total_used += r->used();
4462 }
4463
4464 return false;
4465 }
4466
4467 size_t total_used() {
4468 return _total_used;
4469 }
4470 };
4471
4472 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4473 assert_at_safepoint_on_vm_thread();
4474
4475 if (!free_list_only) {
4476 _eden.clear();
4477 _survivor.clear();
4478 }
4479
4480 RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4481 heap_region_iterate(&cl);
4482
4483 if (!free_list_only) {
4484 set_used(cl.total_used());
4485 if (_archive_allocator != NULL) {
4486 _archive_allocator->clear_used();
4487 }
4488 }
4489 assert(used() == recalculate_used(),
4490 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4491 used(), recalculate_used());
4492 }
4493
4494 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4495 HeapRegion* hr = heap_region_containing(p);
4496 return hr->is_in(p);
4497 }
4498
4499 // Methods for the mutator alloc region
4500
4568 new_alloc_region->note_start_of_copying(during_im);
4569 return new_alloc_region;
4570 }
4571 return NULL;
4572 }
4573
4574 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4575 size_t allocated_bytes,
4576 InCSetState dest) {
4577 bool during_im = collector_state()->in_initial_mark_gc();
4578 alloc_region->note_end_of_copying(during_im);
4579 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
4580 if (dest.is_old()) {
4581 old_set_add(alloc_region);
4582 }
4583 _hr_printer.retire(alloc_region);
4584 }
4585
4586 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4587 bool expanded = false;
4588 uint index = _hrm->find_highest_free(&expanded);
4589
4590 if (index != G1_NO_HRM_INDEX) {
4591 if (expanded) {
4592 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4593 HeapRegion::GrainWords * HeapWordSize);
4594 }
4595 _hrm->allocate_free_regions_starting_at(index, 1);
4596 return region_at(index);
4597 }
4598 return NULL;
4599 }
4600
4601 // Optimized nmethod scanning
4602
4603 class RegisterNMethodOopClosure: public OopClosure {
4604 G1CollectedHeap* _g1h;
4605 nmethod* _nm;
4606
4607 template <class T> void do_oop_work(T* p) {
4608 T heap_oop = RawAccess<>::oop_load(p);
4609 if (!CompressedOops::is_null(heap_oop)) {
4610 oop obj = CompressedOops::decode_not_null(heap_oop);
4611 HeapRegion* hr = _g1h->heap_region_containing(obj);
4612 assert(!hr->is_continues_humongous(),
4613 "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4614 " starting at " HR_FORMAT,
4615 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
|