60 #include "runtime/atomic.hpp"
61 #include "runtime/commandLineFlagConstraintList.hpp"
62 #include "runtime/deoptimization.hpp"
63 #include "runtime/fprofiler.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/java.hpp"
67 #include "runtime/javaCalls.hpp"
68 #include "runtime/sharedRuntime.hpp"
69 #include "runtime/synchronizer.hpp"
70 #include "runtime/thread.inline.hpp"
71 #include "runtime/timerTrace.hpp"
72 #include "runtime/vm_operations.hpp"
73 #include "services/memoryService.hpp"
74 #include "utilities/copy.hpp"
75 #include "utilities/events.hpp"
76 #include "utilities/hashtable.inline.hpp"
77 #include "utilities/macros.hpp"
78 #include "utilities/ostream.hpp"
79 #include "utilities/preserveException.hpp"
80 #if INCLUDE_ALL_GCS
81 #include "gc/cms/cmsCollectorPolicy.hpp"
82 #include "gc/g1/g1CollectedHeap.inline.hpp"
83 #include "gc/g1/g1CollectorPolicy.hpp"
84 #include "gc/parallel/parallelScavengeHeap.hpp"
85 #include "gc/shared/adaptiveSizePolicy.hpp"
86 #endif // INCLUDE_ALL_GCS
87 #if INCLUDE_CDS
88 #include "classfile/sharedClassUtil.hpp"
89 #endif
90
91 // Known objects
92 Klass* Universe::_boolArrayKlassObj = NULL;
93 Klass* Universe::_byteArrayKlassObj = NULL;
94 Klass* Universe::_charArrayKlassObj = NULL;
95 Klass* Universe::_intArrayKlassObj = NULL;
96 Klass* Universe::_shortArrayKlassObj = NULL;
97 Klass* Universe::_longArrayKlassObj = NULL;
98 Klass* Universe::_singleArrayKlassObj = NULL;
99 Klass* Universe::_doubleArrayKlassObj = NULL;
100 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
101 Klass* Universe::_objectArrayKlassObj = NULL;
102 oop Universe::_int_mirror = NULL;
103 oop Universe::_float_mirror = NULL;
104 oop Universe::_double_mirror = NULL;
105 oop Universe::_byte_mirror = NULL;
106 oop Universe::_bool_mirror = NULL;
145
146 // These variables are guarded by FullGCALot_lock.
147 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
148 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
149
150 // Heap
151 int Universe::_verify_count = 0;
152
153 // Oop verification (see MacroAssembler::verify_oop)
154 uintptr_t Universe::_verify_oop_mask = 0;
155 uintptr_t Universe::_verify_oop_bits = (uintptr_t) -1;
156
157 int Universe::_base_vtable_size = 0;
158 bool Universe::_bootstrapping = false;
159 bool Universe::_module_initialized = false;
160 bool Universe::_fully_initialized = false;
161
162 size_t Universe::_heap_capacity_at_last_gc;
163 size_t Universe::_heap_used_at_last_gc = 0;
164
165 CollectedHeap* Universe::_collectedHeap = NULL;
166
167 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
168 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
169 address Universe::_narrow_ptrs_base;
170
171 void Universe::basic_type_classes_do(void f(Klass*)) {
172 f(boolArrayKlassObj());
173 f(byteArrayKlassObj());
174 f(charArrayKlassObj());
175 f(intArrayKlassObj());
176 f(shortArrayKlassObj());
177 f(longArrayKlassObj());
178 f(singleArrayKlassObj());
179 f(doubleArrayKlassObj());
180 }
181
182 void Universe::oops_do(OopClosure* f, bool do_all) {
183
184 f->do_oop((oop*) &_int_mirror);
185 f->do_oop((oop*) &_float_mirror);
186 f->do_oop((oop*) &_double_mirror);
624
625 if (_non_oop_bits == 0) {
626 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
627 }
628
629 return (void*)_non_oop_bits;
630 }
631
632 jint universe_init() {
633 assert(!Universe::_fully_initialized, "called after initialize_vtables");
634 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
635 "LogHeapWordSize is incorrect.");
636 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
637 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
638 "oop size is not not a multiple of HeapWord size");
639
640 TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime));
641
642 JavaClasses::compute_hard_coded_offsets();
643
644 jint status = Universe::initialize_heap();
645 if (status != JNI_OK) {
646 return status;
647 }
648
649 Metaspace::global_initialize();
650
651 AOTLoader::universe_init();
652
653 // Checks 'AfterMemoryInit' constraints.
654 if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
655 return JNI_EINVAL;
656 }
657
658 // Create memory for metadata. Must be after initializing heap for
659 // DumpSharedSpaces.
660 ClassLoaderData::init_null_class_loader_data();
661
662 // We have a heap so create the Method* caches before
663 // Metaspace::initialize_shared_spaces() tries to populate them.
664 Universe::_finalizer_register_cache = new LatestMethodCache();
673 // the file (other than the mapped regions) is no longer needed, and
674 // the file is closed. Closing the file does not affect the
675 // currently mapped regions.
676 MetaspaceShared::initialize_shared_spaces();
677 StringTable::create_table();
678 } else {
679 SymbolTable::create_table();
680 StringTable::create_table();
681
682 if (DumpSharedSpaces) {
683 MetaspaceShared::prepare_for_dumping();
684 }
685 }
686 if (strlen(VerifySubSet) > 0) {
687 Universe::initialize_verify_flags();
688 }
689
690 return JNI_OK;
691 }
692
693 CollectedHeap* Universe::create_heap() {
694 assert(_collectedHeap == NULL, "Heap already created");
695 #if !INCLUDE_ALL_GCS
696 if (UseParallelGC) {
697 fatal("UseParallelGC not supported in this VM.");
698 } else if (UseG1GC) {
699 fatal("UseG1GC not supported in this VM.");
700 } else if (UseConcMarkSweepGC) {
701 fatal("UseConcMarkSweepGC not supported in this VM.");
702 #else
703 if (UseParallelGC) {
704 return Universe::create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
705 } else if (UseG1GC) {
706 return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
707 } else if (UseConcMarkSweepGC) {
708 return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
709 #endif
710 } else if (UseSerialGC) {
711 return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();
712 }
713
714 ShouldNotReachHere();
715 return NULL;
716 }
717
718 // Choose the heap base address and oop encoding mode
719 // when compressed oops are used:
720 // Unscaled - Use 32-bits oops without encoding when
721 // NarrowOopHeapBaseMin + heap_size < 4Gb
722 // ZeroBased - Use zero based compressed oops with encoding when
723 // NarrowOopHeapBaseMin + heap_size < 32Gb
724 // HeapBased - Use compressed oops with heap base + encoding.
725
726 jint Universe::initialize_heap() {
727 jint status = JNI_ERR;
728
729 _collectedHeap = create_heap_ext();
730 if (_collectedHeap == NULL) {
731 _collectedHeap = create_heap();
732 }
733
734 status = _collectedHeap->initialize();
735 if (status != JNI_OK) {
736 return status;
737 }
738 log_info(gc)("Using %s", _collectedHeap->name());
739
740 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
741
742 #ifdef _LP64
743 if (UseCompressedOops) {
744 // Subtract a page because something can get allocated at heap base.
745 // This also makes implicit null checking work, because the
746 // memory+1 page below heap_base needs to cause a signal.
747 // See needs_explicit_null_check.
748 // Only set the heap base for compressed oops because it indicates
749 // compressed oops for pstack code.
750 if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
751 // Didn't reserve heap below 4Gb. Must shift.
752 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
753 }
754 if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
755 // Did reserve heap below 32Gb. Can use base == 0;
756 Universe::set_narrow_oop_base(0);
757 }
758
759 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
760
761 if (log_is_enabled(Info, gc, heap, coops)) {
762 ResourceMark rm;
763 outputStream* logst = Log(gc, heap, coops)::info_stream();
764 Universe::print_compressed_oops_mode(logst);
765 }
766
767 // Tell tests in which mode we run.
768 Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
769 narrow_oop_mode_to_string(narrow_oop_mode()),
770 false));
771 }
772 // Universe::narrow_oop_base() is one page below the heap.
773 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
774 os::vm_page_size()) ||
775 Universe::narrow_oop_base() == NULL, "invalid value");
776 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
777 Universe::narrow_oop_shift() == 0, "invalid value");
778 #endif
779
780 // We will never reach the CATCH below since Exceptions::_throw will cause
781 // the VM to exit if an exception is thrown during initialization
782
783 if (UseTLAB) {
784 assert(Universe::heap()->supports_tlab_allocation(),
785 "Should support thread-local allocation buffers");
786 ThreadLocalAllocBuffer::startup_initialization();
787 }
788 return JNI_OK;
789 }
790
791 void Universe::print_compressed_oops_mode(outputStream* st) {
792 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
793 p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
794
795 st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
796
797 if (Universe::narrow_oop_base() != 0) {
798 st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
799 }
800
801 if (Universe::narrow_oop_shift() != 0) {
802 st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
803 }
804
805 if (!Universe::narrow_oop_use_implicit_null_checks()) {
806 st->print(", no protected page in front of the heap");
807 }
808 st->cr();
809 }
810
811 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
812
813 assert(alignment <= Arguments::conservative_max_heap_alignment(),
837 // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
838 Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
839 }
840
841 return total_rs;
842 }
843
844 vm_exit_during_initialization(
845 err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
846 total_reserved/K));
847
848 // satisfy compiler
849 ShouldNotReachHere();
850 return ReservedHeapSpace(0, 0, false);
851 }
852
853
854 // It's the caller's responsibility to ensure glitch-freedom
855 // (if required).
856 void Universe::update_heap_info_at_gc() {
857 _heap_capacity_at_last_gc = heap()->capacity();
858 _heap_used_at_last_gc = heap()->used();
859 }
860
861
862 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
863 switch (mode) {
864 case UnscaledNarrowOop:
865 return "32-bit";
866 case ZeroBasedNarrowOop:
867 return "Zero based";
868 case DisjointBaseNarrowOop:
869 return "Non-zero disjoint base";
870 case HeapBasedNarrowOop:
871 return "Non-zero based";
872 }
873
874 ShouldNotReachHere();
875 return "";
876 }
877
878
1042 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(ik, len, CHECK_false);
1043 for (int i=0; i<len; i++) {
1044 oop err = ik->allocate_instance(CHECK_false);
1045 Handle err_h = Handle(THREAD, err);
1046 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1047 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1048 }
1049 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1050 }
1051
1052 Universe::initialize_known_methods(CHECK_false);
1053
1054 // This needs to be done before the first scavenge/gc, since
1055 // it's an input to soft ref clearing policy.
1056 {
1057 MutexLocker x(Heap_lock);
1058 Universe::update_heap_info_at_gc();
1059 }
1060
1061 // ("weak") refs processing infrastructure initialization
1062 Universe::heap()->post_initialize();
1063
1064 // Initialize performance counters for metaspaces
1065 MetaspaceCounters::initialize_performance_counters();
1066 CompressedClassSpaceCounters::initialize_performance_counters();
1067
1068 MemoryService::add_metaspace_memory_pools();
1069
1070 MemoryService::set_universe_heap(Universe::heap());
1071 #if INCLUDE_CDS
1072 SharedClassUtil::initialize(CHECK_false);
1073 #endif
1074 return true;
1075 }
1076
1077
1078 void Universe::compute_base_vtable_size() {
1079 _base_vtable_size = ClassLoader::compute_Object_vtable();
1080 }
1081
1082 void Universe::print_on(outputStream* st) {
1083 GCMutexLocker hl(Heap_lock); // Heap_lock might be locked by caller thread.
1084 st->print_cr("Heap");
1085 heap()->print_on(st);
1086 }
1087
1088 void Universe::print_heap_at_SIGBREAK() {
1089 if (PrintHeapAtSIGBREAK) {
1090 print_on(tty);
1091 tty->cr();
1092 tty->flush();
1093 }
1094 }
1095
1096 void Universe::print_heap_before_gc() {
1097 Log(gc, heap) log;
1098 if (log.is_debug()) {
1099 log.debug("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
1100 ResourceMark rm;
1101 heap()->print_on(log.debug_stream());
1102 }
1103 }
1104
1105 void Universe::print_heap_after_gc() {
1106 Log(gc, heap) log;
1107 if (log.is_debug()) {
1108 log.debug("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
1109 ResourceMark rm;
1110 heap()->print_on(log.debug_stream());
1111 }
1112 }
1113
1114 void Universe::initialize_verify_flags() {
1115 verify_flags = 0;
1116 const char delimiter[] = " ,";
1117
1118 size_t length = strlen(VerifySubSet);
1119 char* subset_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
1120 strncpy(subset_list, VerifySubSet, length + 1);
1121
1122 char* token = strtok(subset_list, delimiter);
1123 while (token != NULL) {
1124 if (strcmp(token, "threads") == 0) {
1125 verify_flags |= Verify_Threads;
1126 } else if (strcmp(token, "heap") == 0) {
1127 verify_flags |= Verify_Heap;
1128 } else if (strcmp(token, "symbol_table") == 0) {
1129 verify_flags |= Verify_SymbolTable;
1130 } else if (strcmp(token, "string_table") == 0) {
1164 _verify_in_progress = true;
1165
1166 COMPILER2_PRESENT(
1167 assert(!DerivedPointerTable::is_active(),
1168 "DPT should not be active during verification "
1169 "(of thread stacks below)");
1170 )
1171
1172 ResourceMark rm;
1173 HandleMark hm; // Handles created during verification can be zapped
1174 _verify_count++;
1175
1176 FormatBuffer<> title("Verifying %s", prefix);
1177 GCTraceTime(Info, gc, verify) tm(title.buffer());
1178 if (should_verify_subset(Verify_Threads)) {
1179 log_debug(gc, verify)("Threads");
1180 Threads::verify();
1181 }
1182 if (should_verify_subset(Verify_Heap)) {
1183 log_debug(gc, verify)("Heap");
1184 heap()->verify(option);
1185 }
1186 if (should_verify_subset(Verify_SymbolTable)) {
1187 log_debug(gc, verify)("SymbolTable");
1188 SymbolTable::verify();
1189 }
1190 if (should_verify_subset(Verify_StringTable)) {
1191 log_debug(gc, verify)("StringTable");
1192 StringTable::verify();
1193 }
1194 if (should_verify_subset(Verify_CodeCache)) {
1195 {
1196 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1197 log_debug(gc, verify)("CodeCache");
1198 CodeCache::verify();
1199 }
1200 }
1201 if (should_verify_subset(Verify_SystemDictionary)) {
1202 log_debug(gc, verify)("SystemDictionary");
1203 SystemDictionary::verify();
1204 }
1244 while ((mask & diff) != 0)
1245 mask <<= 1;
1246 uintptr_t bits = (min & mask);
1247 assert(bits == (max & mask), "correct mask");
1248 // check an intermediate value between min and max, just to make sure:
1249 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1250
1251 // require address alignment, too:
1252 mask |= (alignSize - 1);
1253
1254 if (!(_verify_oop_mask == 0 && _verify_oop_bits == (uintptr_t)-1)) {
1255 assert(_verify_oop_mask == mask && _verify_oop_bits == bits, "mask stability");
1256 }
1257 _verify_oop_mask = mask;
1258 _verify_oop_bits = bits;
1259 }
1260
1261 // Oop verification (see MacroAssembler::verify_oop)
1262
1263 uintptr_t Universe::verify_oop_mask() {
1264 MemRegion m = heap()->reserved_region();
1265 calculate_verify_data(m.start(), m.end());
1266 return _verify_oop_mask;
1267 }
1268
1269 uintptr_t Universe::verify_oop_bits() {
1270 MemRegion m = heap()->reserved_region();
1271 calculate_verify_data(m.start(), m.end());
1272 return _verify_oop_bits;
1273 }
1274
1275 uintptr_t Universe::verify_mark_mask() {
1276 return markOopDesc::lock_mask_in_place;
1277 }
1278
1279 uintptr_t Universe::verify_mark_bits() {
1280 intptr_t mask = verify_mark_mask();
1281 intptr_t bits = (intptr_t)markOopDesc::prototype();
1282 assert((bits & ~mask) == 0, "no stray header bits");
1283 return bits;
1284 }
1285 #endif // PRODUCT
1286
1287
1288 void Universe::compute_verify_oop_data() {
1289 verify_oop_mask();
1290 verify_oop_bits();
|
60 #include "runtime/atomic.hpp"
61 #include "runtime/commandLineFlagConstraintList.hpp"
62 #include "runtime/deoptimization.hpp"
63 #include "runtime/fprofiler.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/java.hpp"
67 #include "runtime/javaCalls.hpp"
68 #include "runtime/sharedRuntime.hpp"
69 #include "runtime/synchronizer.hpp"
70 #include "runtime/thread.inline.hpp"
71 #include "runtime/timerTrace.hpp"
72 #include "runtime/vm_operations.hpp"
73 #include "services/memoryService.hpp"
74 #include "utilities/copy.hpp"
75 #include "utilities/events.hpp"
76 #include "utilities/hashtable.inline.hpp"
77 #include "utilities/macros.hpp"
78 #include "utilities/ostream.hpp"
79 #include "utilities/preserveException.hpp"
80 #if INCLUDE_CDS
81 #include "classfile/sharedClassUtil.hpp"
82 #endif
83
84 // Known objects
85 Klass* Universe::_boolArrayKlassObj = NULL;
86 Klass* Universe::_byteArrayKlassObj = NULL;
87 Klass* Universe::_charArrayKlassObj = NULL;
88 Klass* Universe::_intArrayKlassObj = NULL;
89 Klass* Universe::_shortArrayKlassObj = NULL;
90 Klass* Universe::_longArrayKlassObj = NULL;
91 Klass* Universe::_singleArrayKlassObj = NULL;
92 Klass* Universe::_doubleArrayKlassObj = NULL;
93 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
94 Klass* Universe::_objectArrayKlassObj = NULL;
95 oop Universe::_int_mirror = NULL;
96 oop Universe::_float_mirror = NULL;
97 oop Universe::_double_mirror = NULL;
98 oop Universe::_byte_mirror = NULL;
99 oop Universe::_bool_mirror = NULL;
138
139 // These variables are guarded by FullGCALot_lock.
140 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
141 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
142
143 // Heap
144 int Universe::_verify_count = 0;
145
146 // Oop verification (see MacroAssembler::verify_oop)
147 uintptr_t Universe::_verify_oop_mask = 0;
148 uintptr_t Universe::_verify_oop_bits = (uintptr_t) -1;
149
150 int Universe::_base_vtable_size = 0;
151 bool Universe::_bootstrapping = false;
152 bool Universe::_module_initialized = false;
153 bool Universe::_fully_initialized = false;
154
155 size_t Universe::_heap_capacity_at_last_gc;
156 size_t Universe::_heap_used_at_last_gc = 0;
157
158 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
159 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
160 address Universe::_narrow_ptrs_base;
161
162 void Universe::basic_type_classes_do(void f(Klass*)) {
163 f(boolArrayKlassObj());
164 f(byteArrayKlassObj());
165 f(charArrayKlassObj());
166 f(intArrayKlassObj());
167 f(shortArrayKlassObj());
168 f(longArrayKlassObj());
169 f(singleArrayKlassObj());
170 f(doubleArrayKlassObj());
171 }
172
173 void Universe::oops_do(OopClosure* f, bool do_all) {
174
175 f->do_oop((oop*) &_int_mirror);
176 f->do_oop((oop*) &_float_mirror);
177 f->do_oop((oop*) &_double_mirror);
615
616 if (_non_oop_bits == 0) {
617 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
618 }
619
620 return (void*)_non_oop_bits;
621 }
622
623 jint universe_init() {
624 assert(!Universe::_fully_initialized, "called after initialize_vtables");
625 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
626 "LogHeapWordSize is incorrect.");
627 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
628 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
629 "oop size is not not a multiple of HeapWord size");
630
631 TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime));
632
633 JavaClasses::compute_hard_coded_offsets();
634
635 assert(GC::is_initialized(), "needs to be initialized here");
636 jint status = GC::gc()->initialize_heap();
637 if (status != JNI_OK) {
638 return status;
639 }
640
641 status = Universe::initialize_heap();
642 if (status != JNI_OK) {
643 return status;
644 }
645
646 Metaspace::global_initialize();
647
648 AOTLoader::universe_init();
649
650 // Checks 'AfterMemoryInit' constraints.
651 if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
652 return JNI_EINVAL;
653 }
654
655 // Create memory for metadata. Must be after initializing heap for
656 // DumpSharedSpaces.
657 ClassLoaderData::init_null_class_loader_data();
658
659 // We have a heap so create the Method* caches before
660 // Metaspace::initialize_shared_spaces() tries to populate them.
661 Universe::_finalizer_register_cache = new LatestMethodCache();
670 // the file (other than the mapped regions) is no longer needed, and
671 // the file is closed. Closing the file does not affect the
672 // currently mapped regions.
673 MetaspaceShared::initialize_shared_spaces();
674 StringTable::create_table();
675 } else {
676 SymbolTable::create_table();
677 StringTable::create_table();
678
679 if (DumpSharedSpaces) {
680 MetaspaceShared::prepare_for_dumping();
681 }
682 }
683 if (strlen(VerifySubSet) > 0) {
684 Universe::initialize_verify_flags();
685 }
686
687 return JNI_OK;
688 }
689
690 // Choose the heap base address and oop encoding mode
691 // when compressed oops are used:
692 // Unscaled - Use 32-bits oops without encoding when
693 // NarrowOopHeapBaseMin + heap_size < 4Gb
694 // ZeroBased - Use zero based compressed oops with encoding when
695 // NarrowOopHeapBaseMin + heap_size < 32Gb
696 // HeapBased - Use compressed oops with heap base + encoding.
697
698 jint Universe::initialize_heap() {
699 jint status = JNI_ERR;
700
701 GC* gc = GC::gc();
702 CollectedHeap* heap = gc->heap();
703 ThreadLocalAllocBuffer::set_max_size(heap->max_tlab_size());
704
705 #ifdef _LP64
706 if (UseCompressedOops) {
707 // Subtract a page because something can get allocated at heap base.
708 // This also makes implicit null checking work, because the
709 // memory+1 page below heap_base needs to cause a signal.
710 // See needs_explicit_null_check.
711 // Only set the heap base for compressed oops because it indicates
712 // compressed oops for pstack code.
713 if ((uint64_t) heap->reserved_region().end() > UnscaledOopHeapMax) {
714 // Didn't reserve heap below 4Gb. Must shift.
715 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
716 }
717 if ((uint64_t) heap->reserved_region().end() <= OopEncodingHeapMax) {
718 // Did reserve heap below 32Gb. Can use base == 0;
719 Universe::set_narrow_oop_base(0);
720 }
721
722 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
723
724 if (log_is_enabled(Info, gc, heap, coops)) {
725 ResourceMark rm;
726 outputStream* logst = Log(gc, heap, coops)::info_stream();
727 Universe::print_compressed_oops_mode(logst);
728 }
729
730 // Tell tests in which mode we run.
731 Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
732 narrow_oop_mode_to_string(narrow_oop_mode()),
733 false));
734 }
735 // Universe::narrow_oop_base() is one page below the heap.
736 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(heap->base() -
737 os::vm_page_size()) ||
738 Universe::narrow_oop_base() == NULL, "invalid value");
739 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
740 Universe::narrow_oop_shift() == 0, "invalid value");
741 #endif
742
743 // We will never reach the CATCH below since Exceptions::_throw will cause
744 // the VM to exit if an exception is thrown during initialization
745
746 if (UseTLAB) {
747 assert(heap->supports_tlab_allocation(),
748 "Should support thread-local allocation buffers");
749 ThreadLocalAllocBuffer::startup_initialization();
750 }
751 return JNI_OK;
752 }
753
754 void Universe::print_compressed_oops_mode(outputStream* st) {
755 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
756 p2i(GC::gc()->heap()->base()), GC::gc()->heap()->reserved_region().byte_size()/M);
757
758 st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
759
760 if (Universe::narrow_oop_base() != 0) {
761 st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
762 }
763
764 if (Universe::narrow_oop_shift() != 0) {
765 st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
766 }
767
768 if (!Universe::narrow_oop_use_implicit_null_checks()) {
769 st->print(", no protected page in front of the heap");
770 }
771 st->cr();
772 }
773
774 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
775
776 assert(alignment <= Arguments::conservative_max_heap_alignment(),
800 // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
801 Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
802 }
803
804 return total_rs;
805 }
806
807 vm_exit_during_initialization(
808 err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
809 total_reserved/K));
810
811 // satisfy compiler
812 ShouldNotReachHere();
813 return ReservedHeapSpace(0, 0, false);
814 }
815
816
817 // It's the caller's responsibility to ensure glitch-freedom
818 // (if required).
819 void Universe::update_heap_info_at_gc() {
820 _heap_capacity_at_last_gc = GC::gc()->heap()->capacity();
821 _heap_used_at_last_gc = GC::gc()->heap()->used();
822 }
823
824
825 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
826 switch (mode) {
827 case UnscaledNarrowOop:
828 return "32-bit";
829 case ZeroBasedNarrowOop:
830 return "Zero based";
831 case DisjointBaseNarrowOop:
832 return "Non-zero disjoint base";
833 case HeapBasedNarrowOop:
834 return "Non-zero based";
835 }
836
837 ShouldNotReachHere();
838 return "";
839 }
840
841
1005 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(ik, len, CHECK_false);
1006 for (int i=0; i<len; i++) {
1007 oop err = ik->allocate_instance(CHECK_false);
1008 Handle err_h = Handle(THREAD, err);
1009 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1010 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1011 }
1012 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1013 }
1014
1015 Universe::initialize_known_methods(CHECK_false);
1016
1017 // This needs to be done before the first scavenge/gc, since
1018 // it's an input to soft ref clearing policy.
1019 {
1020 MutexLocker x(Heap_lock);
1021 Universe::update_heap_info_at_gc();
1022 }
1023
1024 // ("weak") refs processing infrastructure initialization
1025 GC::gc()->heap()->post_initialize();
1026
1027 // Initialize performance counters for metaspaces
1028 MetaspaceCounters::initialize_performance_counters();
1029 CompressedClassSpaceCounters::initialize_performance_counters();
1030
1031 MemoryService::add_metaspace_memory_pools();
1032
1033 MemoryService::set_universe_heap(GC::gc()->heap());
1034 #if INCLUDE_CDS
1035 SharedClassUtil::initialize(CHECK_false);
1036 #endif
1037 return true;
1038 }
1039
1040
1041 void Universe::compute_base_vtable_size() {
1042 _base_vtable_size = ClassLoader::compute_Object_vtable();
1043 }
1044
1045 void Universe::print_on(outputStream* st) {
1046 GCMutexLocker hl(Heap_lock); // Heap_lock might be locked by caller thread.
1047 st->print_cr("Heap");
1048 GC::gc()->heap()->print_on(st);
1049 }
1050
1051 void Universe::print_heap_at_SIGBREAK() {
1052 if (PrintHeapAtSIGBREAK) {
1053 print_on(tty);
1054 tty->cr();
1055 tty->flush();
1056 }
1057 }
1058
1059 void Universe::print_heap_before_gc() {
1060 Log(gc, heap) log;
1061 if (log.is_debug()) {
1062 log.debug("Heap before GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
1063 ResourceMark rm;
1064 GC::gc()->heap()->print_on(log.debug_stream());
1065 }
1066 }
1067
1068 void Universe::print_heap_after_gc() {
1069 Log(gc, heap) log;
1070 if (log.is_debug()) {
1071 log.debug("Heap after GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
1072 ResourceMark rm;
1073 GC::gc()->heap()->print_on(log.debug_stream());
1074 }
1075 }
1076
1077 void Universe::initialize_verify_flags() {
1078 verify_flags = 0;
1079 const char delimiter[] = " ,";
1080
1081 size_t length = strlen(VerifySubSet);
1082 char* subset_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
1083 strncpy(subset_list, VerifySubSet, length + 1);
1084
1085 char* token = strtok(subset_list, delimiter);
1086 while (token != NULL) {
1087 if (strcmp(token, "threads") == 0) {
1088 verify_flags |= Verify_Threads;
1089 } else if (strcmp(token, "heap") == 0) {
1090 verify_flags |= Verify_Heap;
1091 } else if (strcmp(token, "symbol_table") == 0) {
1092 verify_flags |= Verify_SymbolTable;
1093 } else if (strcmp(token, "string_table") == 0) {
1127 _verify_in_progress = true;
1128
1129 COMPILER2_PRESENT(
1130 assert(!DerivedPointerTable::is_active(),
1131 "DPT should not be active during verification "
1132 "(of thread stacks below)");
1133 )
1134
1135 ResourceMark rm;
1136 HandleMark hm; // Handles created during verification can be zapped
1137 _verify_count++;
1138
1139 FormatBuffer<> title("Verifying %s", prefix);
1140 GCTraceTime(Info, gc, verify) tm(title.buffer());
1141 if (should_verify_subset(Verify_Threads)) {
1142 log_debug(gc, verify)("Threads");
1143 Threads::verify();
1144 }
1145 if (should_verify_subset(Verify_Heap)) {
1146 log_debug(gc, verify)("Heap");
1147 GC::gc()->heap()->verify(option);
1148 }
1149 if (should_verify_subset(Verify_SymbolTable)) {
1150 log_debug(gc, verify)("SymbolTable");
1151 SymbolTable::verify();
1152 }
1153 if (should_verify_subset(Verify_StringTable)) {
1154 log_debug(gc, verify)("StringTable");
1155 StringTable::verify();
1156 }
1157 if (should_verify_subset(Verify_CodeCache)) {
1158 {
1159 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1160 log_debug(gc, verify)("CodeCache");
1161 CodeCache::verify();
1162 }
1163 }
1164 if (should_verify_subset(Verify_SystemDictionary)) {
1165 log_debug(gc, verify)("SystemDictionary");
1166 SystemDictionary::verify();
1167 }
1207 while ((mask & diff) != 0)
1208 mask <<= 1;
1209 uintptr_t bits = (min & mask);
1210 assert(bits == (max & mask), "correct mask");
1211 // check an intermediate value between min and max, just to make sure:
1212 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1213
1214 // require address alignment, too:
1215 mask |= (alignSize - 1);
1216
1217 if (!(_verify_oop_mask == 0 && _verify_oop_bits == (uintptr_t)-1)) {
1218 assert(_verify_oop_mask == mask && _verify_oop_bits == bits, "mask stability");
1219 }
1220 _verify_oop_mask = mask;
1221 _verify_oop_bits = bits;
1222 }
1223
1224 // Oop verification (see MacroAssembler::verify_oop)
1225
1226 uintptr_t Universe::verify_oop_mask() {
1227 MemRegion m = GC::gc()->heap()->reserved_region();
1228 calculate_verify_data(m.start(), m.end());
1229 return _verify_oop_mask;
1230 }
1231
1232 uintptr_t Universe::verify_oop_bits() {
1233 MemRegion m = GC::gc()->heap()->reserved_region();
1234 calculate_verify_data(m.start(), m.end());
1235 return _verify_oop_bits;
1236 }
1237
1238 uintptr_t Universe::verify_mark_mask() {
1239 return markOopDesc::lock_mask_in_place;
1240 }
1241
1242 uintptr_t Universe::verify_mark_bits() {
1243 intptr_t mask = verify_mark_mask();
1244 intptr_t bits = (intptr_t)markOopDesc::prototype();
1245 assert((bits & ~mask) == 0, "no stray header bits");
1246 return bits;
1247 }
1248 #endif // PRODUCT
1249
1250
1251 void Universe::compute_verify_oop_data() {
1252 verify_oop_mask();
1253 verify_oop_bits();
|