41 #include "gc/shared/space.hpp"
42 #include "gc/shared/strongRootsScope.hpp"
43 #include "gc/shared/vmGCOperations.hpp"
44 #include "gc/shared/workgroup.hpp"
45 #include "memory/filemap.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/biasedLocking.hpp"
49 #include "runtime/fprofiler.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/management.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/debug.hpp"
57 #include "utilities/formatBuffer.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/stack.inline.hpp"
60 #include "utilities/vmError.hpp"
61 #if INCLUDE_ALL_GCS
62 #include "gc/cms/concurrentMarkSweepThread.hpp"
63 #include "gc/cms/vmCMSOperations.hpp"
64 #endif // INCLUDE_ALL_GCS
65
66 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
67
68 // The set of potentially parallel tasks in root scanning.
69 enum GCH_strong_roots_tasks {
70 GCH_PS_Universe_oops_do,
71 GCH_PS_JNIHandles_oops_do,
72 GCH_PS_ObjectSynchronizer_oops_do,
73 GCH_PS_FlatProfiler_oops_do,
74 GCH_PS_Management_oops_do,
75 GCH_PS_SystemDictionary_oops_do,
76 GCH_PS_ClassLoaderDataGraph_oops_do,
77 GCH_PS_jvmti_oops_do,
78 GCH_PS_CodeCache_oops_do,
79 GCH_PS_aot_oops_do,
80 GCH_PS_younger_gens,
81 // Leave this one last.
82 GCH_PS_NumElements
83 };
84
85 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
86 CollectedHeap(),
87 _rem_set(NULL),
88 _gen_policy(policy),
89 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
90 _full_collections_completed(0)
91 {
92 assert(policy != NULL, "Sanity check");
93 if (UseConcMarkSweepGC) {
94 _workers = new WorkGang("GC Thread", ParallelGCThreads,
95 /* are_GC_task_threads */true,
96 /* are_ConcurrentGC_threads */false);
97 _workers->initialize_workers();
98 } else {
99 // Serial GC does not use workers.
100 _workers = NULL;
101 }
102 }
103
104 jint GenCollectedHeap::initialize() {
105 CollectedHeap::pre_initialize();
106
107 // While there are no constraints in the GC code that HeapWordSize
108 // be any particular value, there are multiple other areas in the
109 // system which believe this to be true (e.g. oop->object_size in some
110 // cases incorrectly returns the size in wordSize units rather than
111 // HeapWordSize).
112 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
113
114 // Allocate space for the heap.
115
116 char* heap_address;
117 ReservedSpace heap_rs;
118
119 size_t heap_alignment = collector_policy()->heap_alignment();
120
121 heap_address = allocate(heap_alignment, &heap_rs);
122
123 if (!heap_rs.is_reserved()) {
124 vm_shutdown_during_initialization(
125 "Could not reserve enough space for object heap");
126 return JNI_ENOMEM;
127 }
128
129 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
130
131 _rem_set = collector_policy()->create_rem_set(reserved_region());
132 set_barrier_set(rem_set()->bs());
133
134 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
135 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
136 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
137
138 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
139 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
140 clear_incremental_collection_failed();
141
142 #if INCLUDE_ALL_GCS
143 // If we are running CMS, create the collector responsible
144 // for collecting the CMS generations.
145 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
146 bool success = create_cms_collector();
147 if (!success) return JNI_ENOMEM;
148 }
149 #endif // INCLUDE_ALL_GCS
150
151 return JNI_OK;
152 }
153
154 char* GenCollectedHeap::allocate(size_t alignment,
155 ReservedSpace* heap_rs){
156 // Now figure out the total size.
157 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
158 assert(alignment % pageSize == 0, "Must be");
159
160 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
161 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
162
163 // Check for overflow.
164 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
165 if (total_reserved < young_spec->max_size()) {
166 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
167 "the maximum representable size");
168 }
169 assert(total_reserved % alignment == 0,
170 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
171 SIZE_FORMAT, total_reserved, alignment);
172
173 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
174
175 os::trace_page_sizes("Heap",
176 collector_policy()->min_heap_byte_size(),
177 total_reserved,
178 alignment,
179 heap_rs->base(),
180 heap_rs->size());
181
182 return heap_rs->base();
183 }
184
185 void GenCollectedHeap::post_initialize() {
186 ref_processing_init();
187 assert((_young_gen->kind() == Generation::DefNew) ||
188 (_young_gen->kind() == Generation::ParNew),
189 "Wrong youngest generation type");
190 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
191
192 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
193 _old_gen->kind() == Generation::MarkSweepCompact,
194 "Wrong generation kind");
195
196 _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
197 _old_gen->capacity(),
198 def_new_gen->from()->capacity());
199 _gen_policy->initialize_gc_policy_counters();
200 }
201
202 void GenCollectedHeap::ref_processing_init() {
203 _young_gen->ref_processor_init();
204 _old_gen->ref_processor_init();
205 }
206
207 size_t GenCollectedHeap::capacity() const {
208 return _young_gen->capacity() + _old_gen->capacity();
209 }
210
211 size_t GenCollectedHeap::used() const {
212 return _young_gen->used() + _old_gen->used();
213 }
214
215 void GenCollectedHeap::save_used_regions() {
216 _old_gen->save_used_region();
217 _young_gen->save_used_region();
218 }
219
220 size_t GenCollectedHeap::max_capacity() const {
221 return _young_gen->max_capacity() + _old_gen->max_capacity();
293
294 if (_old_gen->should_allocate(size, is_tlab)) {
295 res = _old_gen->allocate(size, is_tlab);
296 }
297
298 return res;
299 }
300
301 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
302 bool* gc_overhead_limit_was_exceeded) {
303 return gen_policy()->mem_allocate_work(size,
304 false /* is_tlab */,
305 gc_overhead_limit_was_exceeded);
306 }
307
308 bool GenCollectedHeap::must_clear_all_soft_refs() {
309 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
310 _gc_cause == GCCause::_wb_full_gc;
311 }
312
313 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
314 if (!UseConcMarkSweepGC) {
315 return false;
316 }
317
318 switch (cause) {
319 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
320 case GCCause::_java_lang_system_gc:
321 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
322 default: return false;
323 }
324 }
325
326 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
327 bool is_tlab, bool run_verification, bool clear_soft_refs,
328 bool restore_marks_for_biased_locking) {
329 FormatBuffer<> title("Collect gen: %s", gen->short_name());
330 GCTraceTime(Trace, gc, phases) t1(title);
331 TraceCollectorStats tcs(gen->counters());
332 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
333
334 gen->stat_record()->invocations++;
335 gen->stat_record()->accumulated_time.start();
336
337 // Must be done anew before each collection because
338 // a previous collection will do mangling and will
339 // change top of some spaces.
340 record_gen_tops_before_GC();
341
342 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
343
344 if (run_verification && VerifyBeforeGC) {
345 HandleMark hm; // Discard invalid handles created during verification
659 CLDClosure* cld_closure) {
660 MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
661
662 process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
663 cld_closure, cld_closure, &mark_code_closure);
664 process_string_table_roots(scope, root_closure);
665
666 if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
667 root_closure->reset_generation();
668 }
669
670 // When collection is parallel, all threads get to cooperate to do
671 // old generation scanning.
672 old_gen_closure->set_generation(_old_gen);
673 rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
674 old_gen_closure->reset_generation();
675
676 _process_strong_tasks->all_tasks_completed(scope->n_threads());
677 }
678
679 void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
680 bool young_gen_as_roots,
681 ScanningOption so,
682 bool only_strong_roots,
683 OopsInGenClosure* root_closure,
684 CLDClosure* cld_closure) {
685 MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
686 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
687 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
688
689 process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
690 if (!only_strong_roots) {
691 process_string_table_roots(scope, root_closure);
692 }
693
694 if (young_gen_as_roots &&
695 !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
696 root_closure->set_generation(_young_gen);
697 _young_gen->oop_iterate(root_closure);
698 root_closure->reset_generation();
699 }
700
701 _process_strong_tasks->all_tasks_completed(scope->n_threads());
702 }
703
704 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
705 bool is_adjust_phase,
706 ScanningOption so,
707 bool only_strong_roots,
708 OopsInGenClosure* root_closure,
709 CLDClosure* cld_closure) {
710 MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
711 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
712 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
713
714 process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
715 if (is_adjust_phase) {
716 // We never treat the string table as roots during marking
717 // for the full gc, so we only need to process it during
718 // the adjust phase.
719 process_string_table_roots(scope, root_closure);
720 }
721
722 _process_strong_tasks->all_tasks_completed(scope->n_threads());
723 }
748 bool GenCollectedHeap::no_allocs_since_save_marks() {
749 return _young_gen->no_allocs_since_save_marks() &&
750 _old_gen->no_allocs_since_save_marks();
751 }
752
753 bool GenCollectedHeap::supports_inline_contig_alloc() const {
754 return _young_gen->supports_inline_contig_alloc();
755 }
756
757 HeapWord* volatile* GenCollectedHeap::top_addr() const {
758 return _young_gen->top_addr();
759 }
760
761 HeapWord** GenCollectedHeap::end_addr() const {
762 return _young_gen->end_addr();
763 }
764
765 // public collection interfaces
766
767 void GenCollectedHeap::collect(GCCause::Cause cause) {
768 if (should_do_concurrent_full_gc(cause)) {
769 #if INCLUDE_ALL_GCS
770 // Mostly concurrent full collection.
771 collect_mostly_concurrent(cause);
772 #else // INCLUDE_ALL_GCS
773 ShouldNotReachHere();
774 #endif // INCLUDE_ALL_GCS
775 } else if (cause == GCCause::_wb_young_gc) {
776 // Young collection for the WhiteBox API.
777 collect(cause, YoungGen);
778 } else {
779 #ifdef ASSERT
780 if (cause == GCCause::_scavenge_alot) {
781 // Young collection only.
782 collect(cause, YoungGen);
783 } else {
784 // Stop-the-world full collection.
785 collect(cause, OldGen);
786 }
787 #else
788 // Stop-the-world full collection.
789 collect(cause, OldGen);
790 #endif
791 }
792 }
793
794 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
795 // The caller doesn't have the Heap_lock
802 // The caller has the Heap_lock
803 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
804 collect_locked(cause, OldGen);
805 }
806
807 // this is the private collection interface
808 // The Heap_lock is expected to be held on entry.
809
810 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
811 // Read the GC count while holding the Heap_lock
812 unsigned int gc_count_before = total_collections();
813 unsigned int full_gc_count_before = total_full_collections();
814 {
815 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
816 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
817 cause, max_generation);
818 VMThread::execute(&op);
819 }
820 }
821
822 #if INCLUDE_ALL_GCS
823 bool GenCollectedHeap::create_cms_collector() {
824
825 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
826 "Unexpected generation kinds");
827 // Skip two header words in the block content verification
828 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
829 assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
830 CMSCollector* collector =
831 new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
832 _rem_set,
833 _gen_policy->as_concurrent_mark_sweep_policy());
834
835 if (collector == NULL || !collector->completed_initialization()) {
836 if (collector) {
837 delete collector; // Be nice in embedded situation
838 }
839 vm_shutdown_during_initialization("Could not create CMS collector");
840 return false;
841 }
842 return true; // success
843 }
844
845 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
846 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
847
848 MutexLocker ml(Heap_lock);
849 // Read the GC counts while holding the Heap_lock
850 unsigned int full_gc_count_before = total_full_collections();
851 unsigned int gc_count_before = total_collections();
852 {
853 MutexUnlocker mu(Heap_lock);
854 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
855 VMThread::execute(&op);
856 }
857 }
858 #endif // INCLUDE_ALL_GCS
859
860 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
861 do_full_collection(clear_all_soft_refs, OldGen);
862 }
863
864 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
865 GenerationType last_generation) {
866 GenerationType local_last_generation;
867 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
868 gc_cause() == GCCause::_gc_locker) {
869 local_last_generation = YoungGen;
870 } else {
871 local_last_generation = last_generation;
872 }
873
874 do_collection(true, // full
875 clear_all_soft_refs, // clear_all_soft_refs
876 0, // size
877 false, // is_tlab
878 local_last_generation); // last_generation
879 // Hack XXX FIX ME !!!
1082 cl->do_generation(_old_gen);
1083 cl->do_generation(_young_gen);
1084 } else {
1085 cl->do_generation(_young_gen);
1086 cl->do_generation(_old_gen);
1087 }
1088 }
1089
1090 bool GenCollectedHeap::is_maximal_no_gc() const {
1091 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1092 }
1093
1094 void GenCollectedHeap::save_marks() {
1095 _young_gen->save_marks();
1096 _old_gen->save_marks();
1097 }
1098
1099 GenCollectedHeap* GenCollectedHeap::heap() {
1100 CollectedHeap* heap = Universe::heap();
1101 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1102 assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1103 return (GenCollectedHeap*)heap;
1104 }
1105
1106 void GenCollectedHeap::prepare_for_compaction() {
1107 // Start by compacting into same gen.
1108 CompactPoint cp(_old_gen);
1109 _old_gen->prepare_for_compaction(&cp);
1110 _young_gen->prepare_for_compaction(&cp);
1111 }
1112
1113 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1114 log_debug(gc, verify)("%s", _old_gen->name());
1115 _old_gen->verify();
1116
1117 log_debug(gc, verify)("%s", _old_gen->name());
1118 _young_gen->verify();
1119
1120 log_debug(gc, verify)("RemSet");
1121 rem_set()->verify();
1122 }
1123
1124 void GenCollectedHeap::print_on(outputStream* st) const {
1125 _young_gen->print_on(st);
1126 _old_gen->print_on(st);
1127 MetaspaceAux::print_on(st);
1128 }
1129
1130 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1131 if (workers() != NULL) {
1132 workers()->threads_do(tc);
1133 }
1134 #if INCLUDE_ALL_GCS
1135 if (UseConcMarkSweepGC) {
1136 ConcurrentMarkSweepThread::threads_do(tc);
1137 }
1138 #endif // INCLUDE_ALL_GCS
1139 }
1140
1141 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1142 #if INCLUDE_ALL_GCS
1143 if (UseConcMarkSweepGC) {
1144 workers()->print_worker_threads_on(st);
1145 ConcurrentMarkSweepThread::print_all_on(st);
1146 }
1147 #endif // INCLUDE_ALL_GCS
1148 }
1149
1150 void GenCollectedHeap::print_on_error(outputStream* st) const {
1151 this->CollectedHeap::print_on_error(st);
1152
1153 #if INCLUDE_ALL_GCS
1154 if (UseConcMarkSweepGC) {
1155 st->cr();
1156 CMSCollector::print_on_error(st);
1157 }
1158 #endif // INCLUDE_ALL_GCS
1159 }
1160
1161 void GenCollectedHeap::print_tracing_info() const {
1162 if (TraceYoungGenTime) {
1163 _young_gen->print_summary_info();
1164 }
1165 if (TraceOldGenTime) {
1166 _old_gen->print_summary_info();
1167 }
1168 }
1169
1170 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1171 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1172 _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1173 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1174 _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1175 }
1176
1177 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1178 private:
1179 bool _full;
1180 public:
1181 void do_generation(Generation* gen) {
1182 gen->gc_prologue(_full);
1183 }
1184 GenGCPrologueClosure(bool full) : _full(full) {};
1185 };
1186
1187 void GenCollectedHeap::gc_prologue(bool full) {
1188 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1189
1190 always_do_update_barrier = false;
1191 // Fill TLAB's and such
1192 CollectedHeap::accumulate_statistics_all_tlabs();
1193 ensure_parsability(true); // retire TLABs
1194
1195 // Walk generations
1196 GenGCPrologueClosure blk(full);
1197 generation_iterate(&blk, false); // not old-to-young.
1198 };
1199
1200 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1201 private:
1202 bool _full;
1203 public:
1204 void do_generation(Generation* gen) {
1205 gen->gc_epilogue(_full);
1206 }
1207 GenGCEpilogueClosure(bool full) : _full(full) {};
1208 };
1209
1210 void GenCollectedHeap::gc_epilogue(bool full) {
1211 #if defined(COMPILER2) || INCLUDE_JVMCI
1212 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1213 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1214 guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1215 #endif /* COMPILER2 || INCLUDE_JVMCI */
1216
1217 resize_all_tlabs();
1218
1219 GenGCEpilogueClosure blk(full);
1220 generation_iterate(&blk, false); // not old-to-young.
1221
1222 if (!CleanChunkPoolAsync) {
1223 Chunk::clean_chunk_pool();
1224 }
1225
1226 MetaspaceCounters::update_performance_counters();
1227 CompressedClassSpaceCounters::update_performance_counters();
1228
1229 always_do_update_barrier = UseConcMarkSweepGC;
1230 };
1231
1232 #ifndef PRODUCT
1233 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1234 private:
1235 public:
1236 void do_generation(Generation* gen) {
1237 gen->record_spaces_top();
1238 }
1239 };
1240
1241 void GenCollectedHeap::record_gen_tops_before_GC() {
1242 if (ZapUnusedHeapArea) {
1243 GenGCSaveTopsBeforeGCClosure blk;
1244 generation_iterate(&blk, false); // not old-to-young.
1245 }
1246 }
1247 #endif // not PRODUCT
1248
1249 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1289 };
1290
1291 jlong GenCollectedHeap::millis_since_last_gc() {
1292 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1293 // provided the underlying platform provides such a time source
1294 // (and it is bug free). So we still have to guard against getting
1295 // back a time later than 'now'.
1296 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1297 GenTimeOfLastGCClosure tolgc_cl(now);
1298 // iterate over generations getting the oldest
1299 // time that a generation was collected
1300 generation_iterate(&tolgc_cl, false);
1301
1302 jlong retVal = now - tolgc_cl.time();
1303 if (retVal < 0) {
1304 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1305 ". returning zero instead.", retVal);
1306 return 0;
1307 }
1308 return retVal;
1309 }
1310
1311 void GenCollectedHeap::stop() {
1312 #if INCLUDE_ALL_GCS
1313 if (UseConcMarkSweepGC) {
1314 ConcurrentMarkSweepThread::cmst()->stop();
1315 }
1316 #endif
1317 }
|
41 #include "gc/shared/space.hpp"
42 #include "gc/shared/strongRootsScope.hpp"
43 #include "gc/shared/vmGCOperations.hpp"
44 #include "gc/shared/workgroup.hpp"
45 #include "memory/filemap.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/biasedLocking.hpp"
49 #include "runtime/fprofiler.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "services/management.hpp"
55 #include "services/memoryService.hpp"
56 #include "utilities/debug.hpp"
57 #include "utilities/formatBuffer.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/stack.inline.hpp"
60 #include "utilities/vmError.hpp"
61
62 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
63 CollectedHeap(),
64 _rem_set(NULL),
65 _gen_policy(policy),
66 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
67 _full_collections_completed(0)
68 {
69 assert(policy != NULL, "Sanity check");
70 }
71
72 jint GenCollectedHeap::initialize() {
73 CollectedHeap::pre_initialize();
74
75 // While there are no constraints in the GC code that HeapWordSize
76 // be any particular value, there are multiple other areas in the
77 // system which believe this to be true (e.g. oop->object_size in some
78 // cases incorrectly returns the size in wordSize units rather than
79 // HeapWordSize).
80 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
81
82 // Allocate space for the heap.
83
84 char* heap_address;
85 ReservedSpace heap_rs;
86
87 size_t heap_alignment = collector_policy()->heap_alignment();
88
89 heap_address = allocate(heap_alignment, &heap_rs);
90
91 if (!heap_rs.is_reserved()) {
92 vm_shutdown_during_initialization(
93 "Could not reserve enough space for object heap");
94 return JNI_ENOMEM;
95 }
96
97 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
98
99 _rem_set = collector_policy()->create_rem_set(reserved_region());
100 set_barrier_set(rem_set()->bs());
101
102 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
103 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
104 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
105
106 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
107 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
108 clear_incremental_collection_failed();
109
110 return JNI_OK;
111 }
112
113 char* GenCollectedHeap::allocate(size_t alignment,
114 ReservedSpace* heap_rs){
115 // Now figure out the total size.
116 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
117 assert(alignment % pageSize == 0, "Must be");
118
119 GenerationSpec* young_spec = gen_policy()->young_gen_spec();
120 GenerationSpec* old_spec = gen_policy()->old_gen_spec();
121
122 // Check for overflow.
123 size_t total_reserved = young_spec->max_size() + old_spec->max_size();
124 if (total_reserved < young_spec->max_size()) {
125 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
126 "the maximum representable size");
127 }
128 assert(total_reserved % alignment == 0,
129 "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
130 SIZE_FORMAT, total_reserved, alignment);
131
132 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
133
134 os::trace_page_sizes("Heap",
135 collector_policy()->min_heap_byte_size(),
136 total_reserved,
137 alignment,
138 heap_rs->base(),
139 heap_rs->size());
140
141 return heap_rs->base();
142 }
143
144 void GenCollectedHeap::post_initialize() {
145 ref_processing_init();
146 check_gen_kinds();
147 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
148
149 _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
150 _old_gen->capacity(),
151 def_new_gen->from()->capacity());
152 _gen_policy->initialize_gc_policy_counters();
153 }
154
155 void GenCollectedHeap::check_gen_kinds() {
156 assert(young_gen()->kind() == Generation::DefNew,
157 "Wrong youngest generation type");
158 assert(old_gen()->kind() == Generation::MarkSweepCompact,
159 "Wrong generation kind");
160 }
161
162 void GenCollectedHeap::ref_processing_init() {
163 _young_gen->ref_processor_init();
164 _old_gen->ref_processor_init();
165 }
166
167 size_t GenCollectedHeap::capacity() const {
168 return _young_gen->capacity() + _old_gen->capacity();
169 }
170
171 size_t GenCollectedHeap::used() const {
172 return _young_gen->used() + _old_gen->used();
173 }
174
175 void GenCollectedHeap::save_used_regions() {
176 _old_gen->save_used_region();
177 _young_gen->save_used_region();
178 }
179
180 size_t GenCollectedHeap::max_capacity() const {
181 return _young_gen->max_capacity() + _old_gen->max_capacity();
253
254 if (_old_gen->should_allocate(size, is_tlab)) {
255 res = _old_gen->allocate(size, is_tlab);
256 }
257
258 return res;
259 }
260
261 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
262 bool* gc_overhead_limit_was_exceeded) {
263 return gen_policy()->mem_allocate_work(size,
264 false /* is_tlab */,
265 gc_overhead_limit_was_exceeded);
266 }
267
268 bool GenCollectedHeap::must_clear_all_soft_refs() {
269 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
270 _gc_cause == GCCause::_wb_full_gc;
271 }
272
273 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
274 bool is_tlab, bool run_verification, bool clear_soft_refs,
275 bool restore_marks_for_biased_locking) {
276 FormatBuffer<> title("Collect gen: %s", gen->short_name());
277 GCTraceTime(Trace, gc, phases) t1(title);
278 TraceCollectorStats tcs(gen->counters());
279 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
280
281 gen->stat_record()->invocations++;
282 gen->stat_record()->accumulated_time.start();
283
284 // Must be done anew before each collection because
285 // a previous collection will do mangling and will
286 // change top of some spaces.
287 record_gen_tops_before_GC();
288
289 log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
290
291 if (run_verification && VerifyBeforeGC) {
292 HandleMark hm; // Discard invalid handles created during verification
606 CLDClosure* cld_closure) {
607 MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
608
609 process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
610 cld_closure, cld_closure, &mark_code_closure);
611 process_string_table_roots(scope, root_closure);
612
613 if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
614 root_closure->reset_generation();
615 }
616
617 // When collection is parallel, all threads get to cooperate to do
618 // old generation scanning.
619 old_gen_closure->set_generation(_old_gen);
620 rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
621 old_gen_closure->reset_generation();
622
623 _process_strong_tasks->all_tasks_completed(scope->n_threads());
624 }
625
626 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
627 bool is_adjust_phase,
628 ScanningOption so,
629 bool only_strong_roots,
630 OopsInGenClosure* root_closure,
631 CLDClosure* cld_closure) {
632 MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
633 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
634 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
635
636 process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
637 if (is_adjust_phase) {
638 // We never treat the string table as roots during marking
639 // for the full gc, so we only need to process it during
640 // the adjust phase.
641 process_string_table_roots(scope, root_closure);
642 }
643
644 _process_strong_tasks->all_tasks_completed(scope->n_threads());
645 }
670 bool GenCollectedHeap::no_allocs_since_save_marks() {
671 return _young_gen->no_allocs_since_save_marks() &&
672 _old_gen->no_allocs_since_save_marks();
673 }
674
675 bool GenCollectedHeap::supports_inline_contig_alloc() const {
676 return _young_gen->supports_inline_contig_alloc();
677 }
678
679 HeapWord* volatile* GenCollectedHeap::top_addr() const {
680 return _young_gen->top_addr();
681 }
682
683 HeapWord** GenCollectedHeap::end_addr() const {
684 return _young_gen->end_addr();
685 }
686
687 // public collection interfaces
688
689 void GenCollectedHeap::collect(GCCause::Cause cause) {
690 if (cause == GCCause::_wb_young_gc) {
691 // Young collection for the WhiteBox API.
692 collect(cause, YoungGen);
693 } else {
694 #ifdef ASSERT
695 if (cause == GCCause::_scavenge_alot) {
696 // Young collection only.
697 collect(cause, YoungGen);
698 } else {
699 // Stop-the-world full collection.
700 collect(cause, OldGen);
701 }
702 #else
703 // Stop-the-world full collection.
704 collect(cause, OldGen);
705 #endif
706 }
707 }
708
709 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
710 // The caller doesn't have the Heap_lock
717 // The caller has the Heap_lock
718 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
719 collect_locked(cause, OldGen);
720 }
721
722 // this is the private collection interface
723 // The Heap_lock is expected to be held on entry.
724
725 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
726 // Read the GC count while holding the Heap_lock
727 unsigned int gc_count_before = total_collections();
728 unsigned int full_gc_count_before = total_full_collections();
729 {
730 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
731 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
732 cause, max_generation);
733 VMThread::execute(&op);
734 }
735 }
736
737 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
738 do_full_collection(clear_all_soft_refs, OldGen);
739 }
740
741 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
742 GenerationType last_generation) {
743 GenerationType local_last_generation;
744 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
745 gc_cause() == GCCause::_gc_locker) {
746 local_last_generation = YoungGen;
747 } else {
748 local_last_generation = last_generation;
749 }
750
751 do_collection(true, // full
752 clear_all_soft_refs, // clear_all_soft_refs
753 0, // size
754 false, // is_tlab
755 local_last_generation); // last_generation
756 // Hack XXX FIX ME !!!
959 cl->do_generation(_old_gen);
960 cl->do_generation(_young_gen);
961 } else {
962 cl->do_generation(_young_gen);
963 cl->do_generation(_old_gen);
964 }
965 }
966
967 bool GenCollectedHeap::is_maximal_no_gc() const {
968 return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
969 }
970
971 void GenCollectedHeap::save_marks() {
972 _young_gen->save_marks();
973 _old_gen->save_marks();
974 }
975
976 GenCollectedHeap* GenCollectedHeap::heap() {
977 CollectedHeap* heap = Universe::heap();
978 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
979 assert(heap->kind() == CollectedHeap::GenCollectedHeap ||
980 heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
981 return (GenCollectedHeap*) heap;
982 }
983
984 void GenCollectedHeap::prepare_for_compaction() {
985 // Start by compacting into same gen.
986 CompactPoint cp(_old_gen);
987 _old_gen->prepare_for_compaction(&cp);
988 _young_gen->prepare_for_compaction(&cp);
989 }
990
991 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
992 log_debug(gc, verify)("%s", _old_gen->name());
993 _old_gen->verify();
994
995 log_debug(gc, verify)("%s", _old_gen->name());
996 _young_gen->verify();
997
998 log_debug(gc, verify)("RemSet");
999 rem_set()->verify();
1000 }
1001
1002 void GenCollectedHeap::print_on(outputStream* st) const {
1003 _young_gen->print_on(st);
1004 _old_gen->print_on(st);
1005 MetaspaceAux::print_on(st);
1006 }
1007
1008 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1009 }
1010
1011 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1012 }
1013
1014 void GenCollectedHeap::print_tracing_info() const {
1015 if (TraceYoungGenTime) {
1016 _young_gen->print_summary_info();
1017 }
1018 if (TraceOldGenTime) {
1019 _old_gen->print_summary_info();
1020 }
1021 }
1022
1023 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1024 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1025 _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1026 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1027 _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1028 }
1029
1030 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1031 private:
1032 bool _full;
1033 public:
1034 void do_generation(Generation* gen) {
1035 gen->gc_prologue(_full);
1036 }
1037 GenGCPrologueClosure(bool full) : _full(full) {};
1038 };
1039
1040 void GenCollectedHeap::gc_prologue(bool full) {
1041 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1042
1043 // Fill TLAB's and such
1044 CollectedHeap::accumulate_statistics_all_tlabs();
1045 ensure_parsability(true); // retire TLABs
1046
1047 // Walk generations
1048 GenGCPrologueClosure blk(full);
1049 generation_iterate(&blk, false); // not old-to-young.
1050 };
1051
1052 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1053 private:
1054 bool _full;
1055 public:
1056 void do_generation(Generation* gen) {
1057 gen->gc_epilogue(_full);
1058 }
1059 GenGCEpilogueClosure(bool full) : _full(full) {};
1060 };
1061
1062 void GenCollectedHeap::gc_epilogue(bool full) {
1063 #if defined(COMPILER2) || INCLUDE_JVMCI
1064 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1065 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1066 guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1067 #endif /* COMPILER2 || INCLUDE_JVMCI */
1068
1069 resize_all_tlabs();
1070
1071 GenGCEpilogueClosure blk(full);
1072 generation_iterate(&blk, false); // not old-to-young.
1073
1074 if (!CleanChunkPoolAsync) {
1075 Chunk::clean_chunk_pool();
1076 }
1077
1078 MetaspaceCounters::update_performance_counters();
1079 CompressedClassSpaceCounters::update_performance_counters();
1080 };
1081
1082 #ifndef PRODUCT
1083 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1084 private:
1085 public:
1086 void do_generation(Generation* gen) {
1087 gen->record_spaces_top();
1088 }
1089 };
1090
1091 void GenCollectedHeap::record_gen_tops_before_GC() {
1092 if (ZapUnusedHeapArea) {
1093 GenGCSaveTopsBeforeGCClosure blk;
1094 generation_iterate(&blk, false); // not old-to-young.
1095 }
1096 }
1097 #endif // not PRODUCT
1098
1099 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1139 };
1140
1141 jlong GenCollectedHeap::millis_since_last_gc() {
1142 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1143 // provided the underlying platform provides such a time source
1144 // (and it is bug free). So we still have to guard against getting
1145 // back a time later than 'now'.
1146 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1147 GenTimeOfLastGCClosure tolgc_cl(now);
1148 // iterate over generations getting the oldest
1149 // time that a generation was collected
1150 generation_iterate(&tolgc_cl, false);
1151
1152 jlong retVal = now - tolgc_cl.time();
1153 if (retVal < 0) {
1154 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1155 ". returning zero instead.", retVal);
1156 return 0;
1157 }
1158 return retVal;
1159 }
|