39 #include "gc/shared/genOopClosures.inline.hpp"
40 #include "gc/shared/generationSpec.hpp"
41 #include "gc/shared/space.hpp"
42 #include "gc/shared/strongRootsScope.hpp"
43 #include "gc/shared/vmGCOperations.hpp"
44 #include "gc/shared/workgroup.hpp"
45 #include "memory/filemap.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/biasedLocking.hpp"
49 #include "runtime/fprofiler.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/management.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/macros.hpp"
57 #include "utilities/stack.inline.hpp"
58 #include "utilities/vmError.hpp"
59 #if INCLUDE_ALL_GCS
60 #include "gc/cms/concurrentMarkSweepThread.hpp"
61 #include "gc/cms/vmCMSOperations.hpp"
62 #endif // INCLUDE_ALL_GCS
63
64 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
65
66 // The set of potentially parallel tasks in root scanning.
67 enum GCH_strong_roots_tasks {
68 GCH_PS_Universe_oops_do,
69 GCH_PS_JNIHandles_oops_do,
70 GCH_PS_ObjectSynchronizer_oops_do,
71 GCH_PS_FlatProfiler_oops_do,
72 GCH_PS_Management_oops_do,
73 GCH_PS_SystemDictionary_oops_do,
74 GCH_PS_ClassLoaderDataGraph_oops_do,
75 GCH_PS_jvmti_oops_do,
76 GCH_PS_CodeCache_oops_do,
77 GCH_PS_aot_oops_do,
78 GCH_PS_younger_gens,
79 // Leave this one last.
80 GCH_PS_NumElements
81 };
82
83 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
84 CollectedHeap(),
85 _rem_set(NULL),
86 _gen_policy(policy),
87 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
88 _full_collections_completed(0)
89 {
90 assert(policy != NULL, "Sanity check");
91 if (UseConcMarkSweepGC) {
92 _workers = new WorkGang("GC Thread", ParallelGCThreads,
93 /* are_GC_task_threads */true,
94 /* are_ConcurrentGC_threads */false);
95 _workers->initialize_workers();
96 } else {
97 // Serial GC does not use workers.
98 _workers = NULL;
99 }
100 }
101
102 jint GenCollectedHeap::initialize() {
103 CollectedHeap::pre_initialize();
104
105 // While there are no constraints in the GC code that HeapWordSize
106 // be any particular value, there are multiple other areas in the
107 // system which believe this to be true (e.g. oop->object_size in some
108 // cases incorrectly returns the size in wordSize units rather than
109 // HeapWordSize).
110 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
111
112 // Allocate space for the heap.
113
114 char* heap_address;
115 ReservedSpace heap_rs;
116
117 size_t heap_alignment = collector_policy()->heap_alignment();
118
119 heap_address = allocate(heap_alignment, &heap_rs);
120
121 if (!heap_rs.is_reserved()) {
122 vm_shutdown_during_initialization(
123 "Could not reserve enough space for object heap");
124 return JNI_ENOMEM;
125 }
126
127 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
128
129 _rem_set = collector_policy()->create_rem_set(reserved_region());
130 set_barrier_set(rem_set()->bs());
131
132 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
133 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
134 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
135
136 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
137 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
138 clear_incremental_collection_failed();
139
140 #if INCLUDE_ALL_GCS
141 // If we are running CMS, create the collector responsible
142 // for collecting the CMS generations.
143 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
144 bool success = create_cms_collector();
145 if (!success) return JNI_ENOMEM;
146 }
147 #endif // INCLUDE_ALL_GCS
148
149 return JNI_OK;
150 }
151
152 char* GenCollectedHeap::allocate(size_t alignment,
153 ReservedSpace* heap_rs){
154 // Now figure out the total size.
155 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
156 assert(alignment % pageSize == 0, "Must be");
157
158 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
159 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
160
161 // Check for overflow.
162 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
163 if (total_reserved < young_spec->max_size()) {
164 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
165 "the maximum representable size");
166 }
167 assert(total_reserved % alignment == 0,
168 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
169 SIZE_FORMAT, total_reserved, alignment);
170
171 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
291
292 if (_old_gen->should_allocate(size, is_tlab)) {
293 res = _old_gen->allocate(size, is_tlab);
294 }
295
296 return res;
297 }
298
299 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
300 bool* gc_overhead_limit_was_exceeded) {
301 return gen_policy()->mem_allocate_work(size,
302 false /* is_tlab */,
303 gc_overhead_limit_was_exceeded);
304 }
305
306 bool GenCollectedHeap::must_clear_all_soft_refs() {
307 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
308 _gc_cause == GCCause::_wb_full_gc;
309 }
310
311 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
312 if (!UseConcMarkSweepGC) {
313 return false;
314 }
315
316 switch (cause) {
317 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
318 case GCCause::_java_lang_system_gc:
319 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
320 default: return false;
321 }
322 }
323
324 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
325 bool is_tlab, bool run_verification, bool clear_soft_refs,
326 bool restore_marks_for_biased_locking) {
327 FormatBuffer<> title("Collect gen: %s", gen->short_name());
328 GCTraceTime(Trace, gc, phases) t1(title);
329 TraceCollectorStats tcs(gen->counters());
330 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
331
332 gen->stat_record()->invocations++;
333 gen->stat_record()->accumulated_time.start();
334
335 // Must be done anew before each collection because
336 // a previous collection will do mangling and will
337 // change top of some spaces.
338 record_gen_tops_before_GC();
339
340 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
341
342 if (run_verification && VerifyBeforeGC) {
343 HandleMark hm; // Discard invalid handles created during verification
746 bool GenCollectedHeap::no_allocs_since_save_marks() {
747 return _young_gen->no_allocs_since_save_marks() &&
748 _old_gen->no_allocs_since_save_marks();
749 }
750
751 bool GenCollectedHeap::supports_inline_contig_alloc() const {
752 return _young_gen->supports_inline_contig_alloc();
753 }
754
755 HeapWord* volatile* GenCollectedHeap::top_addr() const {
756 return _young_gen->top_addr();
757 }
758
759 HeapWord** GenCollectedHeap::end_addr() const {
760 return _young_gen->end_addr();
761 }
762
763 // public collection interfaces
764
765 void GenCollectedHeap::collect(GCCause::Cause cause) {
766 if (should_do_concurrent_full_gc(cause)) {
767 #if INCLUDE_ALL_GCS
768 // Mostly concurrent full collection.
769 collect_mostly_concurrent(cause);
770 #else // INCLUDE_ALL_GCS
771 ShouldNotReachHere();
772 #endif // INCLUDE_ALL_GCS
773 } else if (cause == GCCause::_wb_young_gc) {
774 // Young collection for the WhiteBox API.
775 collect(cause, YoungGen);
776 } else {
777 #ifdef ASSERT
778 if (cause == GCCause::_scavenge_alot) {
779 // Young collection only.
780 collect(cause, YoungGen);
781 } else {
782 // Stop-the-world full collection.
783 collect(cause, OldGen);
784 }
785 #else
786 // Stop-the-world full collection.
787 collect(cause, OldGen);
788 #endif
789 }
790 }
791
792 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
793 // The caller doesn't have the Heap_lock
800 // The caller has the Heap_lock
801 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
802 collect_locked(cause, OldGen);
803 }
804
805 // this is the private collection interface
806 // The Heap_lock is expected to be held on entry.
807
808 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
809 // Read the GC count while holding the Heap_lock
810 unsigned int gc_count_before = total_collections();
811 unsigned int full_gc_count_before = total_full_collections();
812 {
813 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
814 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
815 cause, max_generation);
816 VMThread::execute(&op);
817 }
818 }
819
820 #if INCLUDE_ALL_GCS
821 bool GenCollectedHeap::create_cms_collector() {
822
823 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
824 "Unexpected generation kinds");
825 // Skip two header words in the block content verification
826 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
827 assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
828 CMSCollector* collector =
829 new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
830 _rem_set,
831 _gen_policy->as_concurrent_mark_sweep_policy());
832
833 if (collector == NULL || !collector->completed_initialization()) {
834 if (collector) {
835 delete collector; // Be nice in embedded situation
836 }
837 vm_shutdown_during_initialization("Could not create CMS collector");
838 return false;
839 }
840 return true; // success
841 }
842
843 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
844 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
845
846 MutexLocker ml(Heap_lock);
847 // Read the GC counts while holding the Heap_lock
848 unsigned int full_gc_count_before = total_full_collections();
849 unsigned int gc_count_before = total_collections();
850 {
851 MutexUnlocker mu(Heap_lock);
852 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
853 VMThread::execute(&op);
854 }
855 }
856 #endif // INCLUDE_ALL_GCS
857
858 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
859 do_full_collection(clear_all_soft_refs, OldGen);
860 }
861
862 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
863 GenerationType last_generation) {
864 GenerationType local_last_generation;
865 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
866 gc_cause() == GCCause::_gc_locker) {
867 local_last_generation = YoungGen;
868 } else {
869 local_last_generation = last_generation;
870 }
871
872 do_collection(true, // full
873 clear_all_soft_refs, // clear_all_soft_refs
874 0, // size
875 false, // is_tlab
876 local_last_generation); // last_generation
877 // Hack XXX FIX ME !!!
1078 bool old_to_young) {
1079 if (old_to_young) {
1080 cl->do_generation(_old_gen);
1081 cl->do_generation(_young_gen);
1082 } else {
1083 cl->do_generation(_young_gen);
1084 cl->do_generation(_old_gen);
1085 }
1086 }
1087
1088 bool GenCollectedHeap::is_maximal_no_gc() const {
1089 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1090 }
1091
1092 void GenCollectedHeap::save_marks() {
1093 _young_gen->save_marks();
1094 _old_gen->save_marks();
1095 }
1096
1097 GenCollectedHeap* GenCollectedHeap::heap() {
1098 CollectedHeap* heap = Universe::heap();
1099 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1100 assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1101 return (GenCollectedHeap*)heap;
1102 }
1103
1104 void GenCollectedHeap::prepare_for_compaction() {
1105 // Start by compacting into same gen.
1106 CompactPoint cp(_old_gen);
1107 _old_gen->prepare_for_compaction(&cp);
1108 _young_gen->prepare_for_compaction(&cp);
1109 }
1110
1111 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1112 log_debug(gc, verify)("%s", _old_gen->name());
1113 _old_gen->verify();
1114
1115 log_debug(gc, verify)("%s", _old_gen->name());
1116 _young_gen->verify();
1117
1118 log_debug(gc, verify)("RemSet");
1119 rem_set()->verify();
1120 }
1121
1122 void GenCollectedHeap::print_on(outputStream* st) const {
1123 _young_gen->print_on(st);
1124 _old_gen->print_on(st);
1125 MetaspaceAux::print_on(st);
1126 }
1127
1128 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1129 if (workers() != NULL) {
1130 workers()->threads_do(tc);
1131 }
1132 #if INCLUDE_ALL_GCS
1133 if (UseConcMarkSweepGC) {
1134 ConcurrentMarkSweepThread::threads_do(tc);
1135 }
1136 #endif // INCLUDE_ALL_GCS
1137 }
1138
1139 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1140 #if INCLUDE_ALL_GCS
1141 if (UseConcMarkSweepGC) {
1142 workers()->print_worker_threads_on(st);
1143 ConcurrentMarkSweepThread::print_all_on(st);
1144 }
1145 #endif // INCLUDE_ALL_GCS
1146 }
1147
1148 void GenCollectedHeap::print_on_error(outputStream* st) const {
1149 this->CollectedHeap::print_on_error(st);
1150
1151 #if INCLUDE_ALL_GCS
1152 if (UseConcMarkSweepGC) {
1153 st->cr();
1154 CMSCollector::print_on_error(st);
1155 }
1156 #endif // INCLUDE_ALL_GCS
1157 }
1158
1159 void GenCollectedHeap::print_tracing_info() const {
1160 if (TraceYoungGenTime) {
1161 _young_gen->print_summary_info();
1162 }
1163 if (TraceOldGenTime) {
1164 _old_gen->print_summary_info();
1165 }
1166 }
1167
1168 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1169 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1170 _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1171 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1172 _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1173 }
1174
1175 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1176 private:
1177 bool _full;
1178 public:
1179 void do_generation(Generation* gen) {
1180 gen->gc_prologue(_full);
1181 }
1182 GenGCPrologueClosure(bool full) : _full(full) {};
1183 };
1184
1185 void GenCollectedHeap::gc_prologue(bool full) {
1186 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1187
1188 always_do_update_barrier = false;
1189 // Fill TLAB's and such
1190 CollectedHeap::accumulate_statistics_all_tlabs();
1191 ensure_parsability(true); // retire TLABs
1192
1193 // Walk generations
1194 GenGCPrologueClosure blk(full);
1195 generation_iterate(&blk, false); // not old-to-young.
1196 };
1197
1198 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1199 private:
1200 bool _full;
1201 public:
1202 void do_generation(Generation* gen) {
1203 gen->gc_epilogue(_full);
1204 }
1205 GenGCEpilogueClosure(bool full) : _full(full) {};
1206 };
1207
1208 void GenCollectedHeap::gc_epilogue(bool full) {
1209 #if defined(COMPILER2) || INCLUDE_JVMCI
1210 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1211 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1212 guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1213 #endif /* COMPILER2 || INCLUDE_JVMCI */
1214
1215 resize_all_tlabs();
1216
1217 GenGCEpilogueClosure blk(full);
1218 generation_iterate(&blk, false); // not old-to-young.
1219
1220 if (!CleanChunkPoolAsync) {
1221 Chunk::clean_chunk_pool();
1222 }
1223
1224 MetaspaceCounters::update_performance_counters();
1225 CompressedClassSpaceCounters::update_performance_counters();
1226
1227 always_do_update_barrier = UseConcMarkSweepGC;
1228 };
1229
1230 #ifndef PRODUCT
1231 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1232 private:
1233 public:
1234 void do_generation(Generation* gen) {
1235 gen->record_spaces_top();
1236 }
1237 };
1238
1239 void GenCollectedHeap::record_gen_tops_before_GC() {
1240 if (ZapUnusedHeapArea) {
1241 GenGCSaveTopsBeforeGCClosure blk;
1242 generation_iterate(&blk, false); // not old-to-young.
1243 }
1244 }
1245 #endif // not PRODUCT
1246
1247 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1287 };
1288
1289 jlong GenCollectedHeap::millis_since_last_gc() {
1290 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1291 // provided the underlying platform provides such a time source
1292 // (and it is bug free). So we still have to guard against getting
1293 // back a time later than 'now'.
1294 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1295 GenTimeOfLastGCClosure tolgc_cl(now);
1296 // iterate over generations getting the oldest
1297 // time that a generation was collected
1298 generation_iterate(&tolgc_cl, false);
1299
1300 jlong retVal = now - tolgc_cl.time();
1301 if (retVal < 0) {
1302 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1303 ". returning zero instead.", retVal);
1304 return 0;
1305 }
1306 return retVal;
1307 }
1308
1309 void GenCollectedHeap::stop() {
1310 #if INCLUDE_ALL_GCS
1311 if (UseConcMarkSweepGC) {
1312 ConcurrentMarkSweepThread::cmst()->stop();
1313 }
1314 #endif
1315 }
|
39 #include "gc/shared/genOopClosures.inline.hpp"
40 #include "gc/shared/generationSpec.hpp"
41 #include "gc/shared/space.hpp"
42 #include "gc/shared/strongRootsScope.hpp"
43 #include "gc/shared/vmGCOperations.hpp"
44 #include "gc/shared/workgroup.hpp"
45 #include "memory/filemap.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/biasedLocking.hpp"
49 #include "runtime/fprofiler.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/management.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/macros.hpp"
57 #include "utilities/stack.inline.hpp"
58 #include "utilities/vmError.hpp"
59
60 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
61
62 // The set of potentially parallel tasks in root scanning.
63 enum GCH_strong_roots_tasks {
64 GCH_PS_Universe_oops_do,
65 GCH_PS_JNIHandles_oops_do,
66 GCH_PS_ObjectSynchronizer_oops_do,
67 GCH_PS_FlatProfiler_oops_do,
68 GCH_PS_Management_oops_do,
69 GCH_PS_SystemDictionary_oops_do,
70 GCH_PS_ClassLoaderDataGraph_oops_do,
71 GCH_PS_jvmti_oops_do,
72 GCH_PS_CodeCache_oops_do,
73 GCH_PS_aot_oops_do,
74 GCH_PS_younger_gens,
75 // Leave this one last.
76 GCH_PS_NumElements
77 };
78
79 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
80 CollectedHeap(),
81 _rem_set(NULL),
82 _gen_policy(policy),
83 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
84 _full_collections_completed(0)
85 {
86 assert(policy != NULL, "Sanity check");
87
88 // Serial GC does not use workers.
89 _workers = NULL;
90 }
91
92 jint GenCollectedHeap::initialize() {
93 CollectedHeap::pre_initialize();
94
95 // While there are no constraints in the GC code that HeapWordSize
96 // be any particular value, there are multiple other areas in the
97 // system which believe this to be true (e.g. oop->object_size in some
98 // cases incorrectly returns the size in wordSize units rather than
99 // HeapWordSize).
100 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
101
102 // Allocate space for the heap.
103
104 char* heap_address;
105 ReservedSpace heap_rs;
106
107 size_t heap_alignment = collector_policy()->heap_alignment();
108
109 heap_address = allocate(heap_alignment, &heap_rs);
110
111 if (!heap_rs.is_reserved()) {
112 vm_shutdown_during_initialization(
113 "Could not reserve enough space for object heap");
114 return JNI_ENOMEM;
115 }
116
117 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
118
119 CardTableModRefBSForCTRS* barrier_set = create_barrier_set(reserved_region());
120 _rem_set = collector_policy()->create_rem_set(reserved_region(), barrier_set);
121 set_barrier_set(barrier_set);
122
123 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
124 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
125 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
126
127 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
128 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
129 clear_incremental_collection_failed();
130
131 return JNI_OK;
132 }
133
134 CardTableModRefBSForCTRS* GenCollectedHeap::create_barrier_set(MemRegion whole_heap) {
135 return new CardTableModRefBSForCTRS(whole_heap);
136 }
137
138 char* GenCollectedHeap::allocate(size_t alignment,
139 ReservedSpace* heap_rs){
140 // Now figure out the total size.
141 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
142 assert(alignment % pageSize == 0, "Must be");
143
144 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
145 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
146
147 // Check for overflow.
148 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
149 if (total_reserved < young_spec->max_size()) {
150 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
151 "the maximum representable size");
152 }
153 assert(total_reserved % alignment == 0,
154 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
155 SIZE_FORMAT, total_reserved, alignment);
156
157 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
277
278 if (_old_gen->should_allocate(size, is_tlab)) {
279 res = _old_gen->allocate(size, is_tlab);
280 }
281
282 return res;
283 }
284
285 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
286 bool* gc_overhead_limit_was_exceeded) {
287 return gen_policy()->mem_allocate_work(size,
288 false /* is_tlab */,
289 gc_overhead_limit_was_exceeded);
290 }
291
292 bool GenCollectedHeap::must_clear_all_soft_refs() {
293 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
294 _gc_cause == GCCause::_wb_full_gc;
295 }
296
297 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
298 bool is_tlab, bool run_verification, bool clear_soft_refs,
299 bool restore_marks_for_biased_locking) {
300 FormatBuffer<> title("Collect gen: %s", gen->short_name());
301 GCTraceTime(Trace, gc, phases) t1(title);
302 TraceCollectorStats tcs(gen->counters());
303 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
304
305 gen->stat_record()->invocations++;
306 gen->stat_record()->accumulated_time.start();
307
308 // Must be done anew before each collection because
309 // a previous collection will do mangling and will
310 // change top of some spaces.
311 record_gen_tops_before_GC();
312
313 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
314
315 if (run_verification && VerifyBeforeGC) {
316 HandleMark hm; // Discard invalid handles created during verification
719 bool GenCollectedHeap::no_allocs_since_save_marks() {
720 return _young_gen->no_allocs_since_save_marks() &&
721 _old_gen->no_allocs_since_save_marks();
722 }
723
724 bool GenCollectedHeap::supports_inline_contig_alloc() const {
725 return _young_gen->supports_inline_contig_alloc();
726 }
727
728 HeapWord* volatile* GenCollectedHeap::top_addr() const {
729 return _young_gen->top_addr();
730 }
731
732 HeapWord** GenCollectedHeap::end_addr() const {
733 return _young_gen->end_addr();
734 }
735
736 // public collection interfaces
737
738 void GenCollectedHeap::collect(GCCause::Cause cause) {
739 if (cause == GCCause::_wb_young_gc) {
740 // Young collection for the WhiteBox API.
741 collect(cause, YoungGen);
742 } else {
743 #ifdef ASSERT
744 if (cause == GCCause::_scavenge_alot) {
745 // Young collection only.
746 collect(cause, YoungGen);
747 } else {
748 // Stop-the-world full collection.
749 collect(cause, OldGen);
750 }
751 #else
752 // Stop-the-world full collection.
753 collect(cause, OldGen);
754 #endif
755 }
756 }
757
758 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
759 // The caller doesn't have the Heap_lock
766 // The caller has the Heap_lock
767 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
768 collect_locked(cause, OldGen);
769 }
770
771 // this is the private collection interface
772 // The Heap_lock is expected to be held on entry.
773
774 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
775 // Read the GC count while holding the Heap_lock
776 unsigned int gc_count_before = total_collections();
777 unsigned int full_gc_count_before = total_full_collections();
778 {
779 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
780 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
781 cause, max_generation);
782 VMThread::execute(&op);
783 }
784 }
785
786 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
787 do_full_collection(clear_all_soft_refs, OldGen);
788 }
789
790 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
791 GenerationType last_generation) {
792 GenerationType local_last_generation;
793 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
794 gc_cause() == GCCause::_gc_locker) {
795 local_last_generation = YoungGen;
796 } else {
797 local_last_generation = last_generation;
798 }
799
800 do_collection(true, // full
801 clear_all_soft_refs, // clear_all_soft_refs
802 0, // size
803 false, // is_tlab
804 local_last_generation); // last_generation
805 // Hack XXX FIX ME !!!
1006 bool old_to_young) {
1007 if (old_to_young) {
1008 cl->do_generation(_old_gen);
1009 cl->do_generation(_young_gen);
1010 } else {
1011 cl->do_generation(_young_gen);
1012 cl->do_generation(_old_gen);
1013 }
1014 }
1015
1016 bool GenCollectedHeap::is_maximal_no_gc() const {
1017 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1018 }
1019
1020 void GenCollectedHeap::save_marks() {
1021 _young_gen->save_marks();
1022 _old_gen->save_marks();
1023 }
1024
1025 GenCollectedHeap* GenCollectedHeap::heap() {
1026 CollectedHeap* heap = GC::gc()->heap();
1027 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1028 assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1029 return (GenCollectedHeap*)heap;
1030 }
1031
1032 void GenCollectedHeap::prepare_for_compaction() {
1033 // Start by compacting into same gen.
1034 CompactPoint cp(_old_gen);
1035 _old_gen->prepare_for_compaction(&cp);
1036 _young_gen->prepare_for_compaction(&cp);
1037 }
1038
1039 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1040 log_debug(gc, verify)("%s", _old_gen->name());
1041 _old_gen->verify();
1042
1043 log_debug(gc, verify)("%s", _old_gen->name());
1044 _young_gen->verify();
1045
1046 log_debug(gc, verify)("RemSet");
1047 rem_set()->verify();
1048 }
1049
1050 void GenCollectedHeap::print_on(outputStream* st) const {
1051 _young_gen->print_on(st);
1052 _old_gen->print_on(st);
1053 MetaspaceAux::print_on(st);
1054 }
1055
1056 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1057 if (workers() != NULL) {
1058 workers()->threads_do(tc);
1059 }
1060 }
1061
1062 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1063 }
1064
1065 void GenCollectedHeap::print_on_error(outputStream* st) const {
1066 this->CollectedHeap::print_on_error(st);
1067 }
1068
1069 void GenCollectedHeap::print_tracing_info() const {
1070 if (TraceYoungGenTime) {
1071 _young_gen->print_summary_info();
1072 }
1073 if (TraceOldGenTime) {
1074 _old_gen->print_summary_info();
1075 }
1076 }
1077
1078 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1079 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1080 _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1081 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1082 _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1083 }
1084
1085 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1086 private:
1087 bool _full;
1088 public:
1089 void do_generation(Generation* gen) {
1090 gen->gc_prologue(_full);
1091 }
1092 GenGCPrologueClosure(bool full) : _full(full) {};
1093 };
1094
1095 void GenCollectedHeap::gc_prologue(bool full) {
1096 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1097
1098 // Fill TLAB's and such
1099 CollectedHeap::accumulate_statistics_all_tlabs();
1100 ensure_parsability(true); // retire TLABs
1101
1102 // Walk generations
1103 GenGCPrologueClosure blk(full);
1104 generation_iterate(&blk, false); // not old-to-young.
1105 };
1106
1107 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1108 private:
1109 bool _full;
1110 public:
1111 void do_generation(Generation* gen) {
1112 gen->gc_epilogue(_full);
1113 }
1114 GenGCEpilogueClosure(bool full) : _full(full) {};
1115 };
1116
1117 void GenCollectedHeap::gc_epilogue(bool full) {
1118 #if defined(COMPILER2) || INCLUDE_JVMCI
1119 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1120 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1121 guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1122 #endif /* COMPILER2 || INCLUDE_JVMCI */
1123
1124 resize_all_tlabs();
1125
1126 GenGCEpilogueClosure blk(full);
1127 generation_iterate(&blk, false); // not old-to-young.
1128
1129 if (!CleanChunkPoolAsync) {
1130 Chunk::clean_chunk_pool();
1131 }
1132
1133 MetaspaceCounters::update_performance_counters();
1134 CompressedClassSpaceCounters::update_performance_counters();
1135
1136 };
1137
1138 #ifndef PRODUCT
1139 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1140 private:
1141 public:
1142 void do_generation(Generation* gen) {
1143 gen->record_spaces_top();
1144 }
1145 };
1146
1147 void GenCollectedHeap::record_gen_tops_before_GC() {
1148 if (ZapUnusedHeapArea) {
1149 GenGCSaveTopsBeforeGCClosure blk;
1150 generation_iterate(&blk, false); // not old-to-young.
1151 }
1152 }
1153 #endif // not PRODUCT
1154
1155 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1195 };
1196
1197 jlong GenCollectedHeap::millis_since_last_gc() {
1198 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1199 // provided the underlying platform provides such a time source
1200 // (and it is bug free). So we still have to guard against getting
1201 // back a time later than 'now'.
1202 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1203 GenTimeOfLastGCClosure tolgc_cl(now);
1204 // iterate over generations getting the oldest
1205 // time that a generation was collected
1206 generation_iterate(&tolgc_cl, false);
1207
1208 jlong retVal = now - tolgc_cl.time();
1209 if (retVal < 0) {
1210 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1211 ". returning zero instead.", retVal);
1212 return 0;
1213 }
1214 return retVal;
1215 }
|