61 #include "gc/g1/heapRegionSet.inline.hpp"
62 #include "gc/g1/vm_operations_g1.hpp"
63 #include "gc/shared/adaptiveSizePolicy.hpp"
64 #include "gc/shared/gcHeapSummary.hpp"
65 #include "gc/shared/gcId.hpp"
66 #include "gc/shared/gcLocker.hpp"
67 #include "gc/shared/gcTimer.hpp"
68 #include "gc/shared/gcTrace.hpp"
69 #include "gc/shared/gcTraceTime.inline.hpp"
70 #include "gc/shared/generationSpec.hpp"
71 #include "gc/shared/isGCActiveMark.hpp"
72 #include "gc/shared/oopStorageParState.hpp"
73 #include "gc/shared/preservedMarks.inline.hpp"
74 #include "gc/shared/suspendibleThreadSet.hpp"
75 #include "gc/shared/referenceProcessor.inline.hpp"
76 #include "gc/shared/taskqueue.inline.hpp"
77 #include "gc/shared/weakProcessor.hpp"
78 #include "logging/log.hpp"
79 #include "memory/allocation.hpp"
80 #include "memory/iterator.hpp"
81 #include "memory/resourceArea.hpp"
82 #include "oops/access.inline.hpp"
83 #include "oops/compressedOops.inline.hpp"
84 #include "oops/oop.inline.hpp"
85 #include "prims/resolvedMethodTable.hpp"
86 #include "runtime/atomic.hpp"
87 #include "runtime/flags/flagSetting.hpp"
88 #include "runtime/handles.inline.hpp"
89 #include "runtime/init.hpp"
90 #include "runtime/orderAccess.hpp"
91 #include "runtime/threadSMR.hpp"
92 #include "runtime/vmThread.hpp"
93 #include "utilities/align.hpp"
94 #include "utilities/globalDefinitions.hpp"
95 #include "utilities/stack.inline.hpp"
96
97 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
98
99 // INVARIANTS/NOTES
100 //
804 curr_region->set_free();
805 curr_region->set_top(curr_region->bottom());
806 if (curr_region != last_region) {
807 curr_region = _hrm.next_region_in_heap(curr_region);
808 } else {
809 curr_region = NULL;
810 }
811 _hrm.shrink_at(curr_index, 1);
812 uncommitted_regions++;
813 }
814
815 // Notify mark-sweep that this is no longer an archive range.
816 G1ArchiveAllocator::set_range_archive(ranges[i], false);
817 }
818
819 if (uncommitted_regions != 0) {
820 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
821 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
822 }
823 decrease_used(size_used);
824 }
825
826 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
827 ResourceMark rm; // For retrieving the thread names in log messages.
828
829 // The structure of this method has a lot of similarities to
830 // attempt_allocation_slow(). The reason these two were not merged
831 // into a single one is that such a method would require several "if
832 // allocation is not humongous do this, otherwise do that"
833 // conditional paths which would obscure its flow. In fact, an early
834 // version of this code did use a unified method which was harder to
835 // follow and, as a result, it had subtle bugs that were hard to
836 // track down. So keeping these two methods separate allows each to
837 // be more readable. It will be good to keep these two in sync as
838 // much as possible.
839
840 assert_heap_not_locked_and_not_at_safepoint();
841 assert(is_humongous(word_size), "attempt_allocation_humongous() "
842 "should only be called for humongous allocations");
843
|
61 #include "gc/g1/heapRegionSet.inline.hpp"
62 #include "gc/g1/vm_operations_g1.hpp"
63 #include "gc/shared/adaptiveSizePolicy.hpp"
64 #include "gc/shared/gcHeapSummary.hpp"
65 #include "gc/shared/gcId.hpp"
66 #include "gc/shared/gcLocker.hpp"
67 #include "gc/shared/gcTimer.hpp"
68 #include "gc/shared/gcTrace.hpp"
69 #include "gc/shared/gcTraceTime.inline.hpp"
70 #include "gc/shared/generationSpec.hpp"
71 #include "gc/shared/isGCActiveMark.hpp"
72 #include "gc/shared/oopStorageParState.hpp"
73 #include "gc/shared/preservedMarks.inline.hpp"
74 #include "gc/shared/suspendibleThreadSet.hpp"
75 #include "gc/shared/referenceProcessor.inline.hpp"
76 #include "gc/shared/taskqueue.inline.hpp"
77 #include "gc/shared/weakProcessor.hpp"
78 #include "logging/log.hpp"
79 #include "memory/allocation.hpp"
80 #include "memory/iterator.hpp"
81 #include "memory/metaspaceShared.hpp"
82 #include "memory/resourceArea.hpp"
83 #include "oops/access.inline.hpp"
84 #include "oops/compressedOops.inline.hpp"
85 #include "oops/oop.inline.hpp"
86 #include "prims/resolvedMethodTable.hpp"
87 #include "runtime/atomic.hpp"
88 #include "runtime/flags/flagSetting.hpp"
89 #include "runtime/handles.inline.hpp"
90 #include "runtime/init.hpp"
91 #include "runtime/orderAccess.hpp"
92 #include "runtime/threadSMR.hpp"
93 #include "runtime/vmThread.hpp"
94 #include "utilities/align.hpp"
95 #include "utilities/globalDefinitions.hpp"
96 #include "utilities/stack.inline.hpp"
97
98 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
99
100 // INVARIANTS/NOTES
101 //
805 curr_region->set_free();
806 curr_region->set_top(curr_region->bottom());
807 if (curr_region != last_region) {
808 curr_region = _hrm.next_region_in_heap(curr_region);
809 } else {
810 curr_region = NULL;
811 }
812 _hrm.shrink_at(curr_index, 1);
813 uncommitted_regions++;
814 }
815
816 // Notify mark-sweep that this is no longer an archive range.
817 G1ArchiveAllocator::set_range_archive(ranges[i], false);
818 }
819
820 if (uncommitted_regions != 0) {
821 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
822 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
823 }
824 decrease_used(size_used);
825 }
826
827 oop G1CollectedHeap::materialize_archived_object(oop obj) {
828 assert(obj != NULL, "archived obj is NULL");
829 assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
830
831 // Loading an archived object makes it strongly reachable. If it is
832 // loaded during concurrent marking, it must be enqueued to the SATB
833 // queue, shading the previously white object gray.
834 G1BarrierSet::enqueue(obj);
835
836 return obj;
837 }
838
839 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
840 ResourceMark rm; // For retrieving the thread names in log messages.
841
842 // The structure of this method has a lot of similarities to
843 // attempt_allocation_slow(). The reason these two were not merged
844 // into a single one is that such a method would require several "if
845 // allocation is not humongous do this, otherwise do that"
846 // conditional paths which would obscure its flow. In fact, an early
847 // version of this code did use a unified method which was harder to
848 // follow and, as a result, it had subtle bugs that were hard to
849 // track down. So keeping these two methods separate allows each to
850 // be more readable. It will be good to keep these two in sync as
851 // much as possible.
852
853 assert_heap_not_locked_and_not_at_safepoint();
854 assert(is_humongous(word_size), "attempt_allocation_humongous() "
855 "should only be called for humongous allocations");
856
|