59 #include "runtime/atomic.inline.hpp"
60 #include "runtime/commandLineFlagConstraintList.hpp"
61 #include "runtime/deoptimization.hpp"
62 #include "runtime/fprofiler.hpp"
63 #include "runtime/handles.inline.hpp"
64 #include "runtime/init.hpp"
65 #include "runtime/java.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/sharedRuntime.hpp"
68 #include "runtime/synchronizer.hpp"
69 #include "runtime/thread.inline.hpp"
70 #include "runtime/timerTrace.hpp"
71 #include "runtime/vm_operations.hpp"
72 #include "services/memoryService.hpp"
73 #include "utilities/copy.hpp"
74 #include "utilities/events.hpp"
75 #include "utilities/hashtable.inline.hpp"
76 #include "utilities/macros.hpp"
77 #include "utilities/ostream.hpp"
78 #include "utilities/preserveException.hpp"
79 #if INCLUDE_ALL_GCS
80 #include "gc/cms/cmsCollectorPolicy.hpp"
81 #include "gc/g1/g1CollectedHeap.inline.hpp"
82 #include "gc/g1/g1CollectorPolicy.hpp"
83 #include "gc/parallel/parallelScavengeHeap.hpp"
84 #include "gc/shared/adaptiveSizePolicy.hpp"
85 #endif // INCLUDE_ALL_GCS
86 #if INCLUDE_CDS
87 #include "classfile/sharedClassUtil.hpp"
88 #endif
89
90 // Known objects
91 Klass* Universe::_boolArrayKlassObj = NULL;
92 Klass* Universe::_byteArrayKlassObj = NULL;
93 Klass* Universe::_charArrayKlassObj = NULL;
94 Klass* Universe::_intArrayKlassObj = NULL;
95 Klass* Universe::_shortArrayKlassObj = NULL;
96 Klass* Universe::_longArrayKlassObj = NULL;
97 Klass* Universe::_singleArrayKlassObj = NULL;
98 Klass* Universe::_doubleArrayKlassObj = NULL;
99 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
100 Klass* Universe::_objectArrayKlassObj = NULL;
101 oop Universe::_int_mirror = NULL;
102 oop Universe::_float_mirror = NULL;
103 oop Universe::_double_mirror = NULL;
104 oop Universe::_byte_mirror = NULL;
105 oop Universe::_bool_mirror = NULL;
143
144 // These variables are guarded by FullGCALot_lock.
145 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
146 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
147
148 // Heap
149 int Universe::_verify_count = 0;
150
151 // Oop verification (see MacroAssembler::verify_oop)
152 uintptr_t Universe::_verify_oop_mask = 0;
153 uintptr_t Universe::_verify_oop_bits = (uintptr_t) -1;
154
155 int Universe::_base_vtable_size = 0;
156 bool Universe::_bootstrapping = false;
157 bool Universe::_module_initialized = false;
158 bool Universe::_fully_initialized = false;
159
160 size_t Universe::_heap_capacity_at_last_gc;
161 size_t Universe::_heap_used_at_last_gc = 0;
162
163 CollectedHeap* Universe::_collectedHeap = NULL;
164
165 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
166 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
167 address Universe::_narrow_ptrs_base;
168
169 void Universe::basic_type_classes_do(void f(Klass*)) {
170 f(boolArrayKlassObj());
171 f(byteArrayKlassObj());
172 f(charArrayKlassObj());
173 f(intArrayKlassObj());
174 f(shortArrayKlassObj());
175 f(longArrayKlassObj());
176 f(singleArrayKlassObj());
177 f(doubleArrayKlassObj());
178 }
179
180 void Universe::oops_do(OopClosure* f, bool do_all) {
181
182 f->do_oop((oop*) &_int_mirror);
183 f->do_oop((oop*) &_float_mirror);
184 f->do_oop((oop*) &_double_mirror);
614
615 if (_non_oop_bits == 0) {
616 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
617 }
618
619 return (void*)_non_oop_bits;
620 }
621
622 jint universe_init() {
623 assert(!Universe::_fully_initialized, "called after initialize_vtables");
624 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
625 "LogHeapWordSize is incorrect.");
626 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
627 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
628 "oop size is not not a multiple of HeapWord size");
629
630 TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime));
631
632 JavaClasses::compute_hard_coded_offsets();
633
634 jint status = Universe::initialize_heap();
635 if (status != JNI_OK) {
636 return status;
637 }
638
639 Metaspace::global_initialize();
640
641 // Checks 'AfterMemoryInit' constraints.
642 if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
643 return JNI_EINVAL;
644 }
645
646 // Create memory for metadata. Must be after initializing heap for
647 // DumpSharedSpaces.
648 ClassLoaderData::init_null_class_loader_data();
649
650 // We have a heap so create the Method* caches before
651 // Metaspace::initialize_shared_spaces() tries to populate them.
652 Universe::_finalizer_register_cache = new LatestMethodCache();
653 Universe::_loader_addClass_cache = new LatestMethodCache();
654 Universe::_pd_implies_cache = new LatestMethodCache();
661 // the file (other than the mapped regions) is no longer needed, and
662 // the file is closed. Closing the file does not affect the
663 // currently mapped regions.
664 MetaspaceShared::initialize_shared_spaces();
665 StringTable::create_table();
666 } else {
667 SymbolTable::create_table();
668 StringTable::create_table();
669
670 if (DumpSharedSpaces) {
671 MetaspaceShared::prepare_for_dumping();
672 }
673 }
674 if (strlen(VerifySubSet) > 0) {
675 Universe::initialize_verify_flags();
676 }
677
678 return JNI_OK;
679 }
680
681 CollectedHeap* Universe::create_heap() {
682 assert(_collectedHeap == NULL, "Heap already created");
683 #if !INCLUDE_ALL_GCS
684 if (UseParallelGC) {
685 fatal("UseParallelGC not supported in this VM.");
686 } else if (UseG1GC) {
687 fatal("UseG1GC not supported in this VM.");
688 } else if (UseConcMarkSweepGC) {
689 fatal("UseConcMarkSweepGC not supported in this VM.");
690 #else
691 if (UseParallelGC) {
692 return Universe::create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
693 } else if (UseG1GC) {
694 return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
695 } else if (UseConcMarkSweepGC) {
696 return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
697 #endif
698 } else if (UseSerialGC) {
699 return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();
700 }
701
702 ShouldNotReachHere();
703 return NULL;
704 }
705
706 // Choose the heap base address and oop encoding mode
707 // when compressed oops are used:
708 // Unscaled - Use 32-bits oops without encoding when
709 // NarrowOopHeapBaseMin + heap_size < 4Gb
710 // ZeroBased - Use zero based compressed oops with encoding when
711 // NarrowOopHeapBaseMin + heap_size < 32Gb
712 // HeapBased - Use compressed oops with heap base + encoding.
713
714 jint Universe::initialize_heap() {
715 jint status = JNI_ERR;
716
717 _collectedHeap = create_heap_ext();
718 if (_collectedHeap == NULL) {
719 _collectedHeap = create_heap();
720 }
721
722 status = _collectedHeap->initialize();
723 if (status != JNI_OK) {
724 return status;
725 }
726 log_info(gc)("Using %s", _collectedHeap->name());
727
728 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
729
730 #ifdef _LP64
731 if (UseCompressedOops) {
732 // Subtract a page because something can get allocated at heap base.
733 // This also makes implicit null checking work, because the
734 // memory+1 page below heap_base needs to cause a signal.
735 // See needs_explicit_null_check.
736 // Only set the heap base for compressed oops because it indicates
737 // compressed oops for pstack code.
738 if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
739 // Didn't reserve heap below 4Gb. Must shift.
740 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
741 }
742 if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
743 // Did reserve heap below 32Gb. Can use base == 0;
744 Universe::set_narrow_oop_base(0);
745 }
746
747 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
748
749 if (log_is_enabled(Info, gc, heap, coops)) {
750 ResourceMark rm;
751 outputStream* logst = Log(gc, heap, coops)::info_stream();
752 Universe::print_compressed_oops_mode(logst);
753 }
754
755 // Tell tests in which mode we run.
756 Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
757 narrow_oop_mode_to_string(narrow_oop_mode()),
758 false));
759 }
760 // Universe::narrow_oop_base() is one page below the heap.
761 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
762 os::vm_page_size()) ||
763 Universe::narrow_oop_base() == NULL, "invalid value");
764 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
765 Universe::narrow_oop_shift() == 0, "invalid value");
766 #endif
767
768 // We will never reach the CATCH below since Exceptions::_throw will cause
769 // the VM to exit if an exception is thrown during initialization
770
771 if (UseTLAB) {
772 assert(Universe::heap()->supports_tlab_allocation(),
773 "Should support thread-local allocation buffers");
774 ThreadLocalAllocBuffer::startup_initialization();
775 }
776 return JNI_OK;
777 }
778
779 void Universe::print_compressed_oops_mode(outputStream* st) {
780 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
781 p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
782
783 st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
784
785 if (Universe::narrow_oop_base() != 0) {
786 st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
787 }
788
789 if (Universe::narrow_oop_shift() != 0) {
790 st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
791 }
792
793 if (!Universe::narrow_oop_use_implicit_null_checks()) {
794 st->print(", no protected page in front of the heap");
795 }
796 st->cr();
797 }
798
799 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
800
801 assert(alignment <= Arguments::conservative_max_heap_alignment(),
825 // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
826 Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
827 }
828
829 return total_rs;
830 }
831
832 vm_exit_during_initialization(
833 err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
834 total_reserved/K));
835
836 // satisfy compiler
837 ShouldNotReachHere();
838 return ReservedHeapSpace(0, 0, false);
839 }
840
841
842 // It's the caller's responsibility to ensure glitch-freedom
843 // (if required).
844 void Universe::update_heap_info_at_gc() {
845 _heap_capacity_at_last_gc = heap()->capacity();
846 _heap_used_at_last_gc = heap()->used();
847 }
848
849
850 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
851 switch (mode) {
852 case UnscaledNarrowOop:
853 return "32-bit";
854 case ZeroBasedNarrowOop:
855 return "Zero based";
856 case DisjointBaseNarrowOop:
857 return "Non-zero disjoint base";
858 case HeapBasedNarrowOop:
859 return "Non-zero based";
860 }
861
862 ShouldNotReachHere();
863 return "";
864 }
865
866
1032 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1033 for (int i=0; i<len; i++) {
1034 oop err = k_h->allocate_instance(CHECK_false);
1035 Handle err_h = Handle(THREAD, err);
1036 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1037 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1038 }
1039 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1040 }
1041
1042 Universe::initialize_known_methods(CHECK_false);
1043
1044 // This needs to be done before the first scavenge/gc, since
1045 // it's an input to soft ref clearing policy.
1046 {
1047 MutexLocker x(Heap_lock);
1048 Universe::update_heap_info_at_gc();
1049 }
1050
1051 // ("weak") refs processing infrastructure initialization
1052 Universe::heap()->post_initialize();
1053
1054 // Initialize performance counters for metaspaces
1055 MetaspaceCounters::initialize_performance_counters();
1056 CompressedClassSpaceCounters::initialize_performance_counters();
1057
1058 MemoryService::add_metaspace_memory_pools();
1059
1060 MemoryService::set_universe_heap(Universe::heap());
1061 #if INCLUDE_CDS
1062 SharedClassUtil::initialize(CHECK_false);
1063 #endif
1064 return true;
1065 }
1066
1067
1068 void Universe::compute_base_vtable_size() {
1069 _base_vtable_size = ClassLoader::compute_Object_vtable();
1070 }
1071
1072 void Universe::print_on(outputStream* st) {
1073 GCMutexLocker hl(Heap_lock); // Heap_lock might be locked by caller thread.
1074 st->print_cr("Heap");
1075 heap()->print_on(st);
1076 }
1077
1078 void Universe::print_heap_at_SIGBREAK() {
1079 if (PrintHeapAtSIGBREAK) {
1080 print_on(tty);
1081 tty->cr();
1082 tty->flush();
1083 }
1084 }
1085
1086 void Universe::print_heap_before_gc() {
1087 Log(gc, heap) log;
1088 if (log.is_debug()) {
1089 log.debug("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
1090 ResourceMark rm;
1091 heap()->print_on(log.debug_stream());
1092 }
1093 }
1094
1095 void Universe::print_heap_after_gc() {
1096 Log(gc, heap) log;
1097 if (log.is_debug()) {
1098 log.debug("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
1099 ResourceMark rm;
1100 heap()->print_on(log.debug_stream());
1101 }
1102 }
1103
1104 void Universe::initialize_verify_flags() {
1105 verify_flags = 0;
1106 const char delimiter[] = " ,";
1107
1108 size_t length = strlen(VerifySubSet);
1109 char* subset_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
1110 strncpy(subset_list, VerifySubSet, length + 1);
1111
1112 char* token = strtok(subset_list, delimiter);
1113 while (token != NULL) {
1114 if (strcmp(token, "threads") == 0) {
1115 verify_flags |= Verify_Threads;
1116 } else if (strcmp(token, "heap") == 0) {
1117 verify_flags |= Verify_Heap;
1118 } else if (strcmp(token, "symbol_table") == 0) {
1119 verify_flags |= Verify_SymbolTable;
1120 } else if (strcmp(token, "string_table") == 0) {
1156 _verify_in_progress = true;
1157
1158 COMPILER2_PRESENT(
1159 assert(!DerivedPointerTable::is_active(),
1160 "DPT should not be active during verification "
1161 "(of thread stacks below)");
1162 )
1163
1164 ResourceMark rm;
1165 HandleMark hm; // Handles created during verification can be zapped
1166 _verify_count++;
1167
1168 FormatBuffer<> title("Verifying %s", prefix);
1169 GCTraceTime(Info, gc, verify) tm(title.buffer());
1170 if (should_verify_subset(Verify_Threads)) {
1171 log_debug(gc, verify)("Threads");
1172 Threads::verify();
1173 }
1174 if (should_verify_subset(Verify_Heap)) {
1175 log_debug(gc, verify)("Heap");
1176 heap()->verify(option);
1177 }
1178 if (should_verify_subset(Verify_SymbolTable)) {
1179 log_debug(gc, verify)("SymbolTable");
1180 SymbolTable::verify();
1181 }
1182 if (should_verify_subset(Verify_StringTable)) {
1183 log_debug(gc, verify)("StringTable");
1184 StringTable::verify();
1185 }
1186 if (should_verify_subset(Verify_CodeCache)) {
1187 {
1188 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1189 log_debug(gc, verify)("CodeCache");
1190 CodeCache::verify();
1191 }
1192 }
1193 if (should_verify_subset(Verify_SystemDictionary)) {
1194 log_debug(gc, verify)("SystemDictionary");
1195 SystemDictionary::verify();
1196 }
1240 while ((mask & diff) != 0)
1241 mask <<= 1;
1242 uintptr_t bits = (min & mask);
1243 assert(bits == (max & mask), "correct mask");
1244 // check an intermediate value between min and max, just to make sure:
1245 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1246
1247 // require address alignment, too:
1248 mask |= (alignSize - 1);
1249
1250 if (!(_verify_oop_mask == 0 && _verify_oop_bits == (uintptr_t)-1)) {
1251 assert(_verify_oop_mask == mask && _verify_oop_bits == bits, "mask stability");
1252 }
1253 _verify_oop_mask = mask;
1254 _verify_oop_bits = bits;
1255 }
1256
1257 // Oop verification (see MacroAssembler::verify_oop)
1258
1259 uintptr_t Universe::verify_oop_mask() {
1260 MemRegion m = heap()->reserved_region();
1261 calculate_verify_data(m.start(), m.end());
1262 return _verify_oop_mask;
1263 }
1264
1265 uintptr_t Universe::verify_oop_bits() {
1266 MemRegion m = heap()->reserved_region();
1267 calculate_verify_data(m.start(), m.end());
1268 return _verify_oop_bits;
1269 }
1270
1271 uintptr_t Universe::verify_mark_mask() {
1272 return markOopDesc::lock_mask_in_place;
1273 }
1274
1275 uintptr_t Universe::verify_mark_bits() {
1276 intptr_t mask = verify_mark_mask();
1277 intptr_t bits = (intptr_t)markOopDesc::prototype();
1278 assert((bits & ~mask) == 0, "no stray header bits");
1279 return bits;
1280 }
1281 #endif // PRODUCT
1282
1283
1284 void Universe::compute_verify_oop_data() {
1285 verify_oop_mask();
1286 verify_oop_bits();
|
59 #include "runtime/atomic.inline.hpp"
60 #include "runtime/commandLineFlagConstraintList.hpp"
61 #include "runtime/deoptimization.hpp"
62 #include "runtime/fprofiler.hpp"
63 #include "runtime/handles.inline.hpp"
64 #include "runtime/init.hpp"
65 #include "runtime/java.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/sharedRuntime.hpp"
68 #include "runtime/synchronizer.hpp"
69 #include "runtime/thread.inline.hpp"
70 #include "runtime/timerTrace.hpp"
71 #include "runtime/vm_operations.hpp"
72 #include "services/memoryService.hpp"
73 #include "utilities/copy.hpp"
74 #include "utilities/events.hpp"
75 #include "utilities/hashtable.inline.hpp"
76 #include "utilities/macros.hpp"
77 #include "utilities/ostream.hpp"
78 #include "utilities/preserveException.hpp"
79 #if INCLUDE_CDS
80 #include "classfile/sharedClassUtil.hpp"
81 #endif
82
83 // Known objects
84 Klass* Universe::_boolArrayKlassObj = NULL;
85 Klass* Universe::_byteArrayKlassObj = NULL;
86 Klass* Universe::_charArrayKlassObj = NULL;
87 Klass* Universe::_intArrayKlassObj = NULL;
88 Klass* Universe::_shortArrayKlassObj = NULL;
89 Klass* Universe::_longArrayKlassObj = NULL;
90 Klass* Universe::_singleArrayKlassObj = NULL;
91 Klass* Universe::_doubleArrayKlassObj = NULL;
92 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
93 Klass* Universe::_objectArrayKlassObj = NULL;
94 oop Universe::_int_mirror = NULL;
95 oop Universe::_float_mirror = NULL;
96 oop Universe::_double_mirror = NULL;
97 oop Universe::_byte_mirror = NULL;
98 oop Universe::_bool_mirror = NULL;
136
137 // These variables are guarded by FullGCALot_lock.
138 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
139 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
140
141 // Heap
142 int Universe::_verify_count = 0;
143
144 // Oop verification (see MacroAssembler::verify_oop)
145 uintptr_t Universe::_verify_oop_mask = 0;
146 uintptr_t Universe::_verify_oop_bits = (uintptr_t) -1;
147
148 int Universe::_base_vtable_size = 0;
149 bool Universe::_bootstrapping = false;
150 bool Universe::_module_initialized = false;
151 bool Universe::_fully_initialized = false;
152
153 size_t Universe::_heap_capacity_at_last_gc;
154 size_t Universe::_heap_used_at_last_gc = 0;
155
156 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
157 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
158 address Universe::_narrow_ptrs_base;
159
160 void Universe::basic_type_classes_do(void f(Klass*)) {
161 f(boolArrayKlassObj());
162 f(byteArrayKlassObj());
163 f(charArrayKlassObj());
164 f(intArrayKlassObj());
165 f(shortArrayKlassObj());
166 f(longArrayKlassObj());
167 f(singleArrayKlassObj());
168 f(doubleArrayKlassObj());
169 }
170
171 void Universe::oops_do(OopClosure* f, bool do_all) {
172
173 f->do_oop((oop*) &_int_mirror);
174 f->do_oop((oop*) &_float_mirror);
175 f->do_oop((oop*) &_double_mirror);
605
606 if (_non_oop_bits == 0) {
607 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
608 }
609
610 return (void*)_non_oop_bits;
611 }
612
613 jint universe_init() {
614 assert(!Universe::_fully_initialized, "called after initialize_vtables");
615 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
616 "LogHeapWordSize is incorrect.");
617 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
618 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
619 "oop size is not not a multiple of HeapWord size");
620
621 TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime));
622
623 JavaClasses::compute_hard_coded_offsets();
624
625 assert(GC::is_initialized(), "needs to be initialized here");
626 jint status = GC::gc()->initialize_heap();
627 if (status != JNI_OK) {
628 return status;
629 }
630
631 status = Universe::initialize_heap();
632 if (status != JNI_OK) {
633 return status;
634 }
635
636 Metaspace::global_initialize();
637
638 // Checks 'AfterMemoryInit' constraints.
639 if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
640 return JNI_EINVAL;
641 }
642
643 // Create memory for metadata. Must be after initializing heap for
644 // DumpSharedSpaces.
645 ClassLoaderData::init_null_class_loader_data();
646
647 // We have a heap so create the Method* caches before
648 // Metaspace::initialize_shared_spaces() tries to populate them.
649 Universe::_finalizer_register_cache = new LatestMethodCache();
650 Universe::_loader_addClass_cache = new LatestMethodCache();
651 Universe::_pd_implies_cache = new LatestMethodCache();
658 // the file (other than the mapped regions) is no longer needed, and
659 // the file is closed. Closing the file does not affect the
660 // currently mapped regions.
661 MetaspaceShared::initialize_shared_spaces();
662 StringTable::create_table();
663 } else {
664 SymbolTable::create_table();
665 StringTable::create_table();
666
667 if (DumpSharedSpaces) {
668 MetaspaceShared::prepare_for_dumping();
669 }
670 }
671 if (strlen(VerifySubSet) > 0) {
672 Universe::initialize_verify_flags();
673 }
674
675 return JNI_OK;
676 }
677
678 // Choose the heap base address and oop encoding mode
679 // when compressed oops are used:
680 // Unscaled - Use 32-bits oops without encoding when
681 // NarrowOopHeapBaseMin + heap_size < 4Gb
682 // ZeroBased - Use zero based compressed oops with encoding when
683 // NarrowOopHeapBaseMin + heap_size < 32Gb
684 // HeapBased - Use compressed oops with heap base + encoding.
685
686 jint Universe::initialize_heap() {
687 jint status = JNI_ERR;
688
689 GC* gc = GC::gc();
690 CollectedHeap* heap = gc->heap();
691 ThreadLocalAllocBuffer::set_max_size(heap->max_tlab_size());
692
693 #ifdef _LP64
694 if (UseCompressedOops) {
695 // Subtract a page because something can get allocated at heap base.
696 // This also makes implicit null checking work, because the
697 // memory+1 page below heap_base needs to cause a signal.
698 // See needs_explicit_null_check.
699 // Only set the heap base for compressed oops because it indicates
700 // compressed oops for pstack code.
701 if ((uint64_t) heap->reserved_region().end() > UnscaledOopHeapMax) {
702 // Didn't reserve heap below 4Gb. Must shift.
703 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
704 }
705 if ((uint64_t) heap->reserved_region().end() <= OopEncodingHeapMax) {
706 // Did reserve heap below 32Gb. Can use base == 0;
707 Universe::set_narrow_oop_base(0);
708 }
709
710 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
711
712 if (log_is_enabled(Info, gc, heap, coops)) {
713 ResourceMark rm;
714 outputStream* logst = Log(gc, heap, coops)::info_stream();
715 Universe::print_compressed_oops_mode(logst);
716 }
717
718 // Tell tests in which mode we run.
719 Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
720 narrow_oop_mode_to_string(narrow_oop_mode()),
721 false));
722 }
723 // Universe::narrow_oop_base() is one page below the heap.
724 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(heap->base() -
725 os::vm_page_size()) ||
726 Universe::narrow_oop_base() == NULL, "invalid value");
727 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
728 Universe::narrow_oop_shift() == 0, "invalid value");
729 #endif
730
731 // We will never reach the CATCH below since Exceptions::_throw will cause
732 // the VM to exit if an exception is thrown during initialization
733
734 if (UseTLAB) {
735 assert(heap->supports_tlab_allocation(),
736 "Should support thread-local allocation buffers");
737 ThreadLocalAllocBuffer::startup_initialization();
738 }
739 return JNI_OK;
740 }
741
742 void Universe::print_compressed_oops_mode(outputStream* st) {
743 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
744 p2i(GC::gc()->heap()->base()), GC::gc()->heap()->reserved_region().byte_size()/M);
745
746 st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
747
748 if (Universe::narrow_oop_base() != 0) {
749 st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
750 }
751
752 if (Universe::narrow_oop_shift() != 0) {
753 st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
754 }
755
756 if (!Universe::narrow_oop_use_implicit_null_checks()) {
757 st->print(", no protected page in front of the heap");
758 }
759 st->cr();
760 }
761
762 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
763
764 assert(alignment <= Arguments::conservative_max_heap_alignment(),
788 // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
789 Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
790 }
791
792 return total_rs;
793 }
794
795 vm_exit_during_initialization(
796 err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
797 total_reserved/K));
798
799 // satisfy compiler
800 ShouldNotReachHere();
801 return ReservedHeapSpace(0, 0, false);
802 }
803
804
805 // It's the caller's responsibility to ensure glitch-freedom
806 // (if required).
807 void Universe::update_heap_info_at_gc() {
808 _heap_capacity_at_last_gc = GC::gc()->heap()->capacity();
809 _heap_used_at_last_gc = GC::gc()->heap()->used();
810 }
811
812
813 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
814 switch (mode) {
815 case UnscaledNarrowOop:
816 return "32-bit";
817 case ZeroBasedNarrowOop:
818 return "Zero based";
819 case DisjointBaseNarrowOop:
820 return "Non-zero disjoint base";
821 case HeapBasedNarrowOop:
822 return "Non-zero based";
823 }
824
825 ShouldNotReachHere();
826 return "";
827 }
828
829
995 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
996 for (int i=0; i<len; i++) {
997 oop err = k_h->allocate_instance(CHECK_false);
998 Handle err_h = Handle(THREAD, err);
999 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1000 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1001 }
1002 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1003 }
1004
1005 Universe::initialize_known_methods(CHECK_false);
1006
1007 // This needs to be done before the first scavenge/gc, since
1008 // it's an input to soft ref clearing policy.
1009 {
1010 MutexLocker x(Heap_lock);
1011 Universe::update_heap_info_at_gc();
1012 }
1013
1014 // ("weak") refs processing infrastructure initialization
1015 GC::gc()->heap()->post_initialize();
1016
1017 // Initialize performance counters for metaspaces
1018 MetaspaceCounters::initialize_performance_counters();
1019 CompressedClassSpaceCounters::initialize_performance_counters();
1020
1021 MemoryService::add_metaspace_memory_pools();
1022
1023 MemoryService::set_universe_heap(GC::gc()->heap());
1024 #if INCLUDE_CDS
1025 SharedClassUtil::initialize(CHECK_false);
1026 #endif
1027 return true;
1028 }
1029
1030
1031 void Universe::compute_base_vtable_size() {
1032 _base_vtable_size = ClassLoader::compute_Object_vtable();
1033 }
1034
1035 void Universe::print_on(outputStream* st) {
1036 GCMutexLocker hl(Heap_lock); // Heap_lock might be locked by caller thread.
1037 st->print_cr("Heap");
1038 GC::gc()->heap()->print_on(st);
1039 }
1040
1041 void Universe::print_heap_at_SIGBREAK() {
1042 if (PrintHeapAtSIGBREAK) {
1043 print_on(tty);
1044 tty->cr();
1045 tty->flush();
1046 }
1047 }
1048
1049 void Universe::print_heap_before_gc() {
1050 Log(gc, heap) log;
1051 if (log.is_debug()) {
1052 log.debug("Heap before GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
1053 ResourceMark rm;
1054 GC::gc()->heap()->print_on(log.debug_stream());
1055 }
1056 }
1057
1058 void Universe::print_heap_after_gc() {
1059 Log(gc, heap) log;
1060 if (log.is_debug()) {
1061 log.debug("Heap after GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
1062 ResourceMark rm;
1063 GC::gc()->heap()->print_on(log.debug_stream());
1064 }
1065 }
1066
1067 void Universe::initialize_verify_flags() {
1068 verify_flags = 0;
1069 const char delimiter[] = " ,";
1070
1071 size_t length = strlen(VerifySubSet);
1072 char* subset_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
1073 strncpy(subset_list, VerifySubSet, length + 1);
1074
1075 char* token = strtok(subset_list, delimiter);
1076 while (token != NULL) {
1077 if (strcmp(token, "threads") == 0) {
1078 verify_flags |= Verify_Threads;
1079 } else if (strcmp(token, "heap") == 0) {
1080 verify_flags |= Verify_Heap;
1081 } else if (strcmp(token, "symbol_table") == 0) {
1082 verify_flags |= Verify_SymbolTable;
1083 } else if (strcmp(token, "string_table") == 0) {
1119 _verify_in_progress = true;
1120
1121 COMPILER2_PRESENT(
1122 assert(!DerivedPointerTable::is_active(),
1123 "DPT should not be active during verification "
1124 "(of thread stacks below)");
1125 )
1126
1127 ResourceMark rm;
1128 HandleMark hm; // Handles created during verification can be zapped
1129 _verify_count++;
1130
1131 FormatBuffer<> title("Verifying %s", prefix);
1132 GCTraceTime(Info, gc, verify) tm(title.buffer());
1133 if (should_verify_subset(Verify_Threads)) {
1134 log_debug(gc, verify)("Threads");
1135 Threads::verify();
1136 }
1137 if (should_verify_subset(Verify_Heap)) {
1138 log_debug(gc, verify)("Heap");
1139 GC::gc()->heap()->verify(option);
1140 }
1141 if (should_verify_subset(Verify_SymbolTable)) {
1142 log_debug(gc, verify)("SymbolTable");
1143 SymbolTable::verify();
1144 }
1145 if (should_verify_subset(Verify_StringTable)) {
1146 log_debug(gc, verify)("StringTable");
1147 StringTable::verify();
1148 }
1149 if (should_verify_subset(Verify_CodeCache)) {
1150 {
1151 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1152 log_debug(gc, verify)("CodeCache");
1153 CodeCache::verify();
1154 }
1155 }
1156 if (should_verify_subset(Verify_SystemDictionary)) {
1157 log_debug(gc, verify)("SystemDictionary");
1158 SystemDictionary::verify();
1159 }
1203 while ((mask & diff) != 0)
1204 mask <<= 1;
1205 uintptr_t bits = (min & mask);
1206 assert(bits == (max & mask), "correct mask");
1207 // check an intermediate value between min and max, just to make sure:
1208 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1209
1210 // require address alignment, too:
1211 mask |= (alignSize - 1);
1212
1213 if (!(_verify_oop_mask == 0 && _verify_oop_bits == (uintptr_t)-1)) {
1214 assert(_verify_oop_mask == mask && _verify_oop_bits == bits, "mask stability");
1215 }
1216 _verify_oop_mask = mask;
1217 _verify_oop_bits = bits;
1218 }
1219
1220 // Oop verification (see MacroAssembler::verify_oop)
1221
1222 uintptr_t Universe::verify_oop_mask() {
1223 MemRegion m = GC::gc()->heap()->reserved_region();
1224 calculate_verify_data(m.start(), m.end());
1225 return _verify_oop_mask;
1226 }
1227
1228 uintptr_t Universe::verify_oop_bits() {
1229 MemRegion m = GC::gc()->heap()->reserved_region();
1230 calculate_verify_data(m.start(), m.end());
1231 return _verify_oop_bits;
1232 }
1233
1234 uintptr_t Universe::verify_mark_mask() {
1235 return markOopDesc::lock_mask_in_place;
1236 }
1237
1238 uintptr_t Universe::verify_mark_bits() {
1239 intptr_t mask = verify_mark_mask();
1240 intptr_t bits = (intptr_t)markOopDesc::prototype();
1241 assert((bits & ~mask) == 0, "no stray header bits");
1242 return bits;
1243 }
1244 #endif // PRODUCT
1245
1246
1247 void Universe::compute_verify_oop_data() {
1248 verify_oop_mask();
1249 verify_oop_bits();
|