134 Array<u2>* Universe::_the_empty_short_array = NULL;
135 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
136 Array<Method*>* Universe::_the_empty_method_array = NULL;
137
138 // These variables are guarded by FullGCALot_lock.
139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
140 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
141
142 // Heap
143 int Universe::_verify_count = 0;
144
145 int Universe::_base_vtable_size = 0;
146 bool Universe::_bootstrapping = false;
147 bool Universe::_fully_initialized = false;
148
149 size_t Universe::_heap_capacity_at_last_gc;
150 size_t Universe::_heap_used_at_last_gc = 0;
151
152 CollectedHeap* Universe::_collectedHeap = NULL;
153
154 NarrowOopStruct Universe::_narrow_oop = { NULL, 0, true };
155
156
157 void Universe::basic_type_classes_do(void f(Klass*)) {
158 f(boolArrayKlassObj());
159 f(byteArrayKlassObj());
160 f(charArrayKlassObj());
161 f(intArrayKlassObj());
162 f(shortArrayKlassObj());
163 f(longArrayKlassObj());
164 f(singleArrayKlassObj());
165 f(doubleArrayKlassObj());
166 }
167
168 void Universe::oops_do(OopClosure* f, bool do_all) {
169
170 f->do_oop((oop*) &_int_mirror);
171 f->do_oop((oop*) &_float_mirror);
172 f->do_oop((oop*) &_double_mirror);
173 f->do_oop((oop*) &_byte_mirror);
174 f->do_oop((oop*) &_bool_mirror);
790 if (status != JNI_OK) {
791 return status;
792 }
793
794 #ifdef _LP64
795 if (UseCompressedOops) {
796 // Subtract a page because something can get allocated at heap base.
797 // This also makes implicit null checking work, because the
798 // memory+1 page below heap_base needs to cause a signal.
799 // See needs_explicit_null_check.
800 // Only set the heap base for compressed oops because it indicates
801 // compressed oops for pstack code.
802 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
803 if (verbose) {
804 tty->cr();
805 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
806 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
807 }
808 if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
809 // Can't reserve heap below 32Gb.
810 Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
811 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
812 if (verbose) {
813 tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
814 }
815 } else {
816 Universe::set_narrow_oop_base(0);
817 if (verbose) {
818 tty->print(", zero based Compressed Oops");
819 }
820 #ifdef _WIN64
821 if (!Universe::narrow_oop_use_implicit_null_checks()) {
822 // Don't need guard page for implicit checks in indexed addressing
823 // mode with zero based Compressed Oops.
824 Universe::set_narrow_oop_use_implicit_null_checks(true);
825 }
826 #endif // _WIN64
827 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
828 // Can't reserve heap below 4Gb.
829 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
830 } else {
831 Universe::set_narrow_oop_shift(0);
832 if (verbose) {
833 tty->print(", 32-bits Oops");
834 }
835 }
836 }
837 if (verbose) {
838 tty->cr();
839 tty->cr();
840 }
841 }
842 assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) ||
843 Universe::narrow_oop_base() == NULL, "invalid value");
844 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
845 Universe::narrow_oop_shift() == 0, "invalid value");
846 #endif
847
848 // We will never reach the CATCH below since Exceptions::_throw will cause
849 // the VM to exit if an exception is thrown during initialization
850
851 if (UseTLAB) {
852 assert(Universe::heap()->supports_tlab_allocation(),
853 "Should support thread-local allocation buffers");
854 ThreadLocalAllocBuffer::startup_initialization();
855 }
856 return JNI_OK;
857 }
858
859
860 // Reserve the Java heap, which is now the same for all GCs.
861 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
862 // Add in the class metaspace area so the classes in the headers can
863 // be compressed the same as instances.
864 size_t total_reserved = align_size_up(heap_size + ClassMetaspaceSize, alignment);
865 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
866
867 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
868
869 if (UseCompressedOops) {
870 if (addr != NULL && !total_rs.is_reserved()) {
871 // Failed to reserve at specified address - the requested memory
872 // region is taken already, for example, by 'java' launcher.
873 // Try again to reserver heap higher.
874 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
875
876 ReservedHeapSpace total_rs0(total_reserved, alignment,
877 UseLargePages, addr);
878
879 if (addr != NULL && !total_rs0.is_reserved()) {
880 // Failed to reserve at specified address again - give up.
881 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
882 assert(addr == NULL, "");
883
884 ReservedHeapSpace total_rs1(total_reserved, alignment,
885 UseLargePages, addr);
886 total_rs = total_rs1;
887 } else {
888 total_rs = total_rs0;
889 }
890 }
891 }
892
893 if (!total_rs.is_reserved()) {
894 vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
895 return total_rs;
896 }
897
898 // Split the reserved space into main Java heap and a space for classes
899 // so that they can be compressed using the same algorithm as compressed oops
900 ReservedSpace heap_rs = total_rs.first_part(heap_size);
901 ReservedSpace class_rs = total_rs.last_part(heap_size, alignment);
902 Metaspace::initialize_class_space(class_rs);
903 return heap_rs;
904 }
905
906
907 // It's the caller's repsonsibility to ensure glitch-freedom
908 // (if required).
909 void Universe::update_heap_info_at_gc() {
910 _heap_capacity_at_last_gc = heap()->capacity();
911 _heap_used_at_last_gc = heap()->used();
912 }
913
914
915
916 void universe2_init() {
917 EXCEPTION_MARK;
918 Universe::genesis(CATCH);
919 // Although we'd like to verify here that the state of the heap
920 // is good, we can't because the main thread has not yet added
921 // itself to the threads list (so, using current interfaces
922 // we can't "fill" its TLAB), unless TLABs are disabled.
|
134 Array<u2>* Universe::_the_empty_short_array = NULL;
135 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
136 Array<Method*>* Universe::_the_empty_method_array = NULL;
137
138 // These variables are guarded by FullGCALot_lock.
139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
140 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
141
142 // Heap
143 int Universe::_verify_count = 0;
144
145 int Universe::_base_vtable_size = 0;
146 bool Universe::_bootstrapping = false;
147 bool Universe::_fully_initialized = false;
148
149 size_t Universe::_heap_capacity_at_last_gc;
150 size_t Universe::_heap_used_at_last_gc = 0;
151
152 CollectedHeap* Universe::_collectedHeap = NULL;
153
154 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
155 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
156 address Universe::_narrow_ptrs_base;
157
158
159 void Universe::basic_type_classes_do(void f(Klass*)) {
160 f(boolArrayKlassObj());
161 f(byteArrayKlassObj());
162 f(charArrayKlassObj());
163 f(intArrayKlassObj());
164 f(shortArrayKlassObj());
165 f(longArrayKlassObj());
166 f(singleArrayKlassObj());
167 f(doubleArrayKlassObj());
168 }
169
170 void Universe::oops_do(OopClosure* f, bool do_all) {
171
172 f->do_oop((oop*) &_int_mirror);
173 f->do_oop((oop*) &_float_mirror);
174 f->do_oop((oop*) &_double_mirror);
175 f->do_oop((oop*) &_byte_mirror);
176 f->do_oop((oop*) &_bool_mirror);
792 if (status != JNI_OK) {
793 return status;
794 }
795
796 #ifdef _LP64
797 if (UseCompressedOops) {
798 // Subtract a page because something can get allocated at heap base.
799 // This also makes implicit null checking work, because the
800 // memory+1 page below heap_base needs to cause a signal.
801 // See needs_explicit_null_check.
802 // Only set the heap base for compressed oops because it indicates
803 // compressed oops for pstack code.
804 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
805 if (verbose) {
806 tty->cr();
807 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
808 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
809 }
810 if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
811 // Can't reserve heap below 32Gb.
812 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
813 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
814 if (verbose) {
815 tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
816 }
817 } else {
818 Universe::set_narrow_oop_base(0);
819 if (verbose) {
820 tty->print(", zero based Compressed Oops");
821 }
822 #ifdef _WIN64
823 if (!Universe::narrow_oop_use_implicit_null_checks()) {
824 // Don't need guard page for implicit checks in indexed addressing
825 // mode with zero based Compressed Oops.
826 Universe::set_narrow_oop_use_implicit_null_checks(true);
827 }
828 #endif // _WIN64
829 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
830 // Can't reserve heap below 4Gb.
831 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
832 } else {
833 Universe::set_narrow_oop_shift(0);
834 if (verbose) {
835 tty->print(", 32-bits Oops");
836 }
837 }
838 }
839 if (verbose) {
840 tty->cr();
841 tty->cr();
842 }
843 if (UseCompressedKlassPointers) {
844 Universe::set_narrow_klass_base(Universe::narrow_oop_base());
845 Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
846 }
847 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
848 }
849 // Universe::narrow_oop_base() is one page below the metaspace
850 // base. The actual metaspace base depends on alignment constraints
851 // so we don't know its exact location here.
852 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
853 Universe::narrow_oop_base() == NULL, "invalid value");
854 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
855 Universe::narrow_oop_shift() == 0, "invalid value");
856 #endif
857
858 // We will never reach the CATCH below since Exceptions::_throw will cause
859 // the VM to exit if an exception is thrown during initialization
860
861 if (UseTLAB) {
862 assert(Universe::heap()->supports_tlab_allocation(),
863 "Should support thread-local allocation buffers");
864 ThreadLocalAllocBuffer::startup_initialization();
865 }
866 return JNI_OK;
867 }
868
869
870 // Reserve the Java heap, which is now the same for all GCs.
871 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
872 // Add in the class metaspace area so the classes in the headers can
873 // be compressed the same as instances.
874 // Need to round class space size up because it's below the heap and
875 // the actual alignment depends on its size.
876 size_t metaspace_size = align_size_up(ClassMetaspaceSize, alignment);
877 size_t total_reserved = align_size_up(heap_size + metaspace_size, alignment);
878 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
879
880 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
881
882 if (UseCompressedOops) {
883 if (addr != NULL && !total_rs.is_reserved()) {
884 // Failed to reserve at specified address - the requested memory
885 // region is taken already, for example, by 'java' launcher.
886 // Try again to reserver heap higher.
887 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
888
889 ReservedHeapSpace total_rs0(total_reserved, alignment,
890 UseLargePages, addr);
891
892 if (addr != NULL && !total_rs0.is_reserved()) {
893 // Failed to reserve at specified address again - give up.
894 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
895 assert(addr == NULL, "");
896
897 ReservedHeapSpace total_rs1(total_reserved, alignment,
898 UseLargePages, addr);
899 total_rs = total_rs1;
900 } else {
901 total_rs = total_rs0;
902 }
903 }
904 }
905
906 if (!total_rs.is_reserved()) {
907 vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
908 return total_rs;
909 }
910
911 // Split the reserved space into main Java heap and a space for
912 // classes so that they can be compressed using the same algorithm
913 // as compressed oops. If compress oops and compress klass ptrs are
914 // used we need the meta space first: if the alignment used for
915 // compressed oops is greater than the one used for compressed klass
916 // ptrs, a metadata space on top of the heap could become
917 // unreachable.
918 ReservedSpace class_rs = total_rs.first_part(metaspace_size);
919 ReservedSpace heap_rs = total_rs.last_part(metaspace_size, alignment);
920 Metaspace::initialize_class_space(class_rs);
921
922 if (UseCompressedOops) {
923 // Universe::initialize_heap() will reset this to NULL if unscaled
924 // or zero-based narrow oops are actually used.
925 address base = (address)(total_rs.base() - os::vm_page_size());
926 Universe::set_narrow_oop_base(base);
927 }
928 return heap_rs;
929 }
930
931
932 // It's the caller's repsonsibility to ensure glitch-freedom
933 // (if required).
934 void Universe::update_heap_info_at_gc() {
935 _heap_capacity_at_last_gc = heap()->capacity();
936 _heap_used_at_last_gc = heap()->used();
937 }
938
939
940
941 void universe2_init() {
942 EXCEPTION_MARK;
943 Universe::genesis(CATCH);
944 // Although we'd like to verify here that the state of the heap
945 // is good, we can't because the main thread has not yet added
946 // itself to the threads list (so, using current interfaces
947 // we can't "fill" its TLAB), unless TLABs are disabled.
|