src/share/vm/memory/universe.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File bug_8003424.4 Sdiff src/share/vm/memory

src/share/vm/memory/universe.cpp

Print this page




 129 // These variables are guarded by FullGCALot_lock.
 130 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
 131 debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
 132 
 133 // Heap
 134 int             Universe::_verify_count = 0;
 135 
 136 int             Universe::_base_vtable_size = 0;
 137 bool            Universe::_bootstrapping = false;
 138 bool            Universe::_fully_initialized = false;
 139 
 140 size_t          Universe::_heap_capacity_at_last_gc;
 141 size_t          Universe::_heap_used_at_last_gc = 0;
 142 
 143 CollectedHeap*  Universe::_collectedHeap = NULL;
 144 
 145 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
 146 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 147 address Universe::_narrow_ptrs_base;
 148 
 149 size_t          Universe::_class_metaspace_size;
 150 
 151 void Universe::basic_type_classes_do(void f(Klass*)) {
 152   f(boolArrayKlassObj());
 153   f(byteArrayKlassObj());
 154   f(charArrayKlassObj());
 155   f(intArrayKlassObj());
 156   f(shortArrayKlassObj());
 157   f(longArrayKlassObj());
 158   f(singleArrayKlassObj());
 159   f(doubleArrayKlassObj());
 160 }
 161 
 162 void Universe::oops_do(OopClosure* f, bool do_all) {
 163 
 164   f->do_oop((oop*) &_int_mirror);
 165   f->do_oop((oop*) &_float_mirror);
 166   f->do_oop((oop*) &_double_mirror);
 167   f->do_oop((oop*) &_byte_mirror);
 168   f->do_oop((oop*) &_bool_mirror);
 169   f->do_oop((oop*) &_char_mirror);
 170   f->do_oop((oop*) &_long_mirror);


 626 
 627   return (void*)non_oop_bits;
 628 }
 629 
 630 jint universe_init() {
 631   assert(!Universe::_fully_initialized, "called after initialize_vtables");
 632   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
 633          "LogHeapWordSize is incorrect.");
 634   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
 635   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
 636             "oop size is not not a multiple of HeapWord size");
 637   TraceTime timer("Genesis", TraceStartupTime);
 638   GC_locker::lock();  // do not allow gc during bootstrapping
 639   JavaClasses::compute_hard_coded_offsets();
 640 
 641   jint status = Universe::initialize_heap();
 642   if (status != JNI_OK) {
 643     return status;
 644   }
 645 


 646   // Create memory for metadata.  Must be after initializing heap for
 647   // DumpSharedSpaces.
 648   ClassLoaderData::init_null_class_loader_data();
 649 
 650   // We have a heap so create the Method* caches before
 651   // Metaspace::initialize_shared_spaces() tries to populate them.
 652   Universe::_finalizer_register_cache = new LatestMethodOopCache();
 653   Universe::_loader_addClass_cache    = new LatestMethodOopCache();
 654   Universe::_pd_implies_cache         = new LatestMethodOopCache();
 655   Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
 656 
 657   if (UseSharedSpaces) {
 658     // Read the data structures supporting the shared spaces (shared
 659     // system dictionary, symbol table, etc.).  After that, access to
 660     // the file (other than the mapped regions) is no longer needed, and
 661     // the file is closed. Closing the file does not affect the
 662     // currently mapped regions.
 663     MetaspaceShared::initialize_shared_spaces();
 664     StringTable::create_table();
 665   } else {


 679 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 680 // HeapBased - Use compressed oops with heap base + encoding.
 681 
 682 // 4Gb
 683 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 684 // 32Gb
 685 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 686 
 687 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
 688   size_t base = 0;
 689 #ifdef _LP64
 690   if (UseCompressedOops) {
 691     assert(mode == UnscaledNarrowOop  ||
 692            mode == ZeroBasedNarrowOop ||
 693            mode == HeapBasedNarrowOop, "mode is invalid");
 694     const size_t total_size = heap_size + HeapBaseMinAddress;
 695     // Return specified base for the first request.
 696     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 697       base = HeapBaseMinAddress;
 698 
 699     // If the total size and the metaspace size are small enough to allow
 700     // UnscaledNarrowOop then just use UnscaledNarrowOop.
 701     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
 702         (!UseCompressedKlassPointers ||
 703           (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
 704       // We don't need to check the metaspace size here because it is always smaller
 705       // than total_size.
 706       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
 707           (Universe::narrow_oop_shift() == 0)) {
 708         // Use 32-bits oops without encoding and
 709         // place heap's top on the 4Gb boundary
 710         base = (NarrowOopHeapMax - heap_size);
 711       } else {
 712         // Can't reserve with NarrowOopShift == 0
 713         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 714         if (mode == UnscaledNarrowOop ||
 715             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 716           // Use zero based compressed oops with encoding and
 717           // place heap's top on the 32Gb boundary in case
 718           // total_size > 4Gb or failed to reserve below 4Gb.
 719           base = (OopEncodingHeapMax - heap_size);
 720         }
 721       }
 722 
 723     // See if ZeroBaseNarrowOop encoding will work for a heap based at
 724     // (KlassEncodingMetaspaceMax - class_metaspace_size()).
 725     } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
 726         (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
 727         (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
 728       base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
 729     } else {
 730       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
 731       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
 732       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 733     }
 734 
 735     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
 736     // used in ReservedHeapSpace() constructors.
 737     // The final values will be set in initialize_heap() below.
 738     if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
 739         (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
 740       // Use zero based compressed oops
 741       Universe::set_narrow_oop_base(NULL);
 742       // Don't need guard page for implicit checks in indexed
 743       // addressing mode with zero based Compressed Oops.
 744       Universe::set_narrow_oop_use_implicit_null_checks(true);
 745     } else {
 746       // Set to a non-NULL value so the ReservedSpace ctor computes
 747       // the correct no-access prefix.
 748       // The final value will be set in initialize_heap() below.
 749       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 750 #ifdef _WIN64
 751       if (UseLargePages) {
 752         // Cannot allocate guard pages for implicit checks in indexed
 753         // addressing mode when large pages are specified on windows.
 754         Universe::set_narrow_oop_use_implicit_null_checks(false);
 755       }
 756 #endif //  _WIN64
 757     }
 758   }
 759 #endif


 802 
 803   jint status = Universe::heap()->initialize();
 804   if (status != JNI_OK) {
 805     return status;
 806   }
 807 
 808 #ifdef _LP64
 809   if (UseCompressedOops) {
 810     // Subtract a page because something can get allocated at heap base.
 811     // This also makes implicit null checking work, because the
 812     // memory+1 page below heap_base needs to cause a signal.
 813     // See needs_explicit_null_check.
 814     // Only set the heap base for compressed oops because it indicates
 815     // compressed oops for pstack code.
 816     bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
 817     if (verbose) {
 818       tty->cr();
 819       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 820                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 821     }
 822     if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
 823         (UseCompressedKlassPointers &&
 824         ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
 825       // Can't reserve heap below 32Gb.
 826       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
 827       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 828       if (verbose) {
 829         tty->print(", %s: "PTR_FORMAT,
 830             narrow_oop_mode_to_string(HeapBasedNarrowOop),
 831             Universe::narrow_oop_base());
 832       }
 833     } else {
 834       Universe::set_narrow_oop_base(0);
 835       if (verbose) {
 836         tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
 837       }
 838 #ifdef _WIN64
 839       if (!Universe::narrow_oop_use_implicit_null_checks()) {
 840         // Don't need guard page for implicit checks in indexed addressing
 841         // mode with zero based Compressed Oops.
 842         Universe::set_narrow_oop_use_implicit_null_checks(true);
 843       }
 844 #endif //  _WIN64
 845       if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
 846         // Can't reserve heap below 4Gb.
 847         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 848       } else {
 849         Universe::set_narrow_oop_shift(0);
 850         if (verbose) {
 851           tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
 852         }
 853       }
 854     }

 855     if (verbose) {
 856       tty->cr();
 857       tty->cr();
 858     }
 859     if (UseCompressedKlassPointers) {
 860       Universe::set_narrow_klass_base(Universe::narrow_oop_base());
 861       Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
 862     }
 863     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 864   }
 865   // Universe::narrow_oop_base() is one page below the metaspace
 866   // base. The actual metaspace base depends on alignment constraints
 867   // so we don't know its exact location here.
 868   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
 869          Universe::narrow_oop_base() == NULL, "invalid value");
 870   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 871          Universe::narrow_oop_shift() == 0, "invalid value");
 872 #endif
 873 
 874   // We will never reach the CATCH below since Exceptions::_throw will cause
 875   // the VM to exit if an exception is thrown during initialization
 876 
 877   if (UseTLAB) {
 878     assert(Universe::heap()->supports_tlab_allocation(),
 879            "Should support thread-local allocation buffers");
 880     ThreadLocalAllocBuffer::startup_initialization();
 881   }
 882   return JNI_OK;
 883 }
 884 
 885 
 886 // Reserve the Java heap, which is now the same for all GCs.
 887 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 888   // Add in the class metaspace area so the classes in the headers can
 889   // be compressed the same as instances.
 890   // Need to round class space size up because it's below the heap and
 891   // the actual alignment depends on its size.
 892   Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
 893   size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
 894   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 895       "heap size is too big for compressed oops");
 896   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 897 
 898   ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
 899 
 900   if (UseCompressedOops) {
 901     if (addr != NULL && !total_rs.is_reserved()) {
 902       // Failed to reserve at specified address - the requested memory
 903       // region is taken already, for example, by 'java' launcher.
 904       // Try again to reserver heap higher.
 905       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 906 
 907       ReservedHeapSpace total_rs0(total_reserved, alignment,
 908                                   UseLargePages, addr);
 909 
 910       if (addr != NULL && !total_rs0.is_reserved()) {
 911         // Failed to reserve at specified address again - give up.
 912         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 913         assert(addr == NULL, "");
 914 
 915         ReservedHeapSpace total_rs1(total_reserved, alignment,
 916                                     UseLargePages, addr);
 917         total_rs = total_rs1;
 918       } else {
 919         total_rs = total_rs0;
 920       }
 921     }
 922   }
 923 
 924   if (!total_rs.is_reserved()) {
 925     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 926     return total_rs;
 927   }
 928 
 929   // Split the reserved space into main Java heap and a space for
 930   // classes so that they can be compressed using the same algorithm
 931   // as compressed oops. If compress oops and compress klass ptrs are
 932   // used we need the meta space first: if the alignment used for
 933   // compressed oops is greater than the one used for compressed klass
 934   // ptrs, a metadata space on top of the heap could become
 935   // unreachable.
 936   ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
 937   ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
 938   Metaspace::initialize_class_space(class_rs);
 939 
 940   if (UseCompressedOops) {
 941     // Universe::initialize_heap() will reset this to NULL if unscaled
 942     // or zero-based narrow oops are actually used.
 943     address base = (address)(total_rs.base() - os::vm_page_size());
 944     Universe::set_narrow_oop_base(base);
 945   }
 946   return heap_rs;
 947 }
 948 
 949 
 950 // It's the caller's repsonsibility to ensure glitch-freedom
 951 // (if required).
 952 void Universe::update_heap_info_at_gc() {
 953   _heap_capacity_at_last_gc = heap()->capacity();
 954   _heap_used_at_last_gc     = heap()->used();
 955 }
 956 
 957 
 958 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
 959   switch (mode) {
 960     case UnscaledNarrowOop:
 961       return "32-bits Oops";
 962     case ZeroBasedNarrowOop:
 963       return "zero based Compressed Oops";
 964     case HeapBasedNarrowOop:
 965       return "Compressed Oops with base";
 966   }
 967 
 968   ShouldNotReachHere();
 969   return "";
 970 }




 129 // These variables are guarded by FullGCALot_lock.
 130 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
 131 debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
 132 
 133 // Heap
 134 int             Universe::_verify_count = 0;
 135 
 136 int             Universe::_base_vtable_size = 0;
 137 bool            Universe::_bootstrapping = false;
 138 bool            Universe::_fully_initialized = false;
 139 
 140 size_t          Universe::_heap_capacity_at_last_gc;
 141 size_t          Universe::_heap_used_at_last_gc = 0;
 142 
 143 CollectedHeap*  Universe::_collectedHeap = NULL;
 144 
 145 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
 146 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 147 address Universe::_narrow_ptrs_base;
 148 


 149 void Universe::basic_type_classes_do(void f(Klass*)) {
 150   f(boolArrayKlassObj());
 151   f(byteArrayKlassObj());
 152   f(charArrayKlassObj());
 153   f(intArrayKlassObj());
 154   f(shortArrayKlassObj());
 155   f(longArrayKlassObj());
 156   f(singleArrayKlassObj());
 157   f(doubleArrayKlassObj());
 158 }
 159 
 160 void Universe::oops_do(OopClosure* f, bool do_all) {
 161 
 162   f->do_oop((oop*) &_int_mirror);
 163   f->do_oop((oop*) &_float_mirror);
 164   f->do_oop((oop*) &_double_mirror);
 165   f->do_oop((oop*) &_byte_mirror);
 166   f->do_oop((oop*) &_bool_mirror);
 167   f->do_oop((oop*) &_char_mirror);
 168   f->do_oop((oop*) &_long_mirror);


 624 
 625   return (void*)non_oop_bits;
 626 }
 627 
 628 jint universe_init() {
 629   assert(!Universe::_fully_initialized, "called after initialize_vtables");
 630   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
 631          "LogHeapWordSize is incorrect.");
 632   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
 633   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
 634             "oop size is not not a multiple of HeapWord size");
 635   TraceTime timer("Genesis", TraceStartupTime);
 636   GC_locker::lock();  // do not allow gc during bootstrapping
 637   JavaClasses::compute_hard_coded_offsets();
 638 
 639   jint status = Universe::initialize_heap();
 640   if (status != JNI_OK) {
 641     return status;
 642   }
 643 
 644   Metaspace::global_initialize();
 645 
 646   // Create memory for metadata.  Must be after initializing heap for
 647   // DumpSharedSpaces.
 648   ClassLoaderData::init_null_class_loader_data();
 649 
 650   // We have a heap so create the Method* caches before
 651   // Metaspace::initialize_shared_spaces() tries to populate them.
 652   Universe::_finalizer_register_cache = new LatestMethodOopCache();
 653   Universe::_loader_addClass_cache    = new LatestMethodOopCache();
 654   Universe::_pd_implies_cache         = new LatestMethodOopCache();
 655   Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
 656 
 657   if (UseSharedSpaces) {
 658     // Read the data structures supporting the shared spaces (shared
 659     // system dictionary, symbol table, etc.).  After that, access to
 660     // the file (other than the mapped regions) is no longer needed, and
 661     // the file is closed. Closing the file does not affect the
 662     // currently mapped regions.
 663     MetaspaceShared::initialize_shared_spaces();
 664     StringTable::create_table();
 665   } else {


 679 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 680 // HeapBased - Use compressed oops with heap base + encoding.
 681 
 682 // 4Gb
 683 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 684 // 32Gb
 685 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 686 
 687 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
 688   size_t base = 0;
 689 #ifdef _LP64
 690   if (UseCompressedOops) {
 691     assert(mode == UnscaledNarrowOop  ||
 692            mode == ZeroBasedNarrowOop ||
 693            mode == HeapBasedNarrowOop, "mode is invalid");
 694     const size_t total_size = heap_size + HeapBaseMinAddress;
 695     // Return specified base for the first request.
 696     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 697       base = HeapBaseMinAddress;
 698 
 699     // If the total size is small enough to allow UnscaledNarrowOop then
 700     // just use UnscaledNarrowOop.
 701     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {




 702       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
 703           (Universe::narrow_oop_shift() == 0)) {
 704         // Use 32-bits oops without encoding and
 705         // place heap's top on the 4Gb boundary
 706         base = (NarrowOopHeapMax - heap_size);
 707       } else {
 708         // Can't reserve with NarrowOopShift == 0
 709         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 710         if (mode == UnscaledNarrowOop ||
 711             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 712           // Use zero based compressed oops with encoding and
 713           // place heap's top on the 32Gb boundary in case
 714           // total_size > 4Gb or failed to reserve below 4Gb.
 715           base = (OopEncodingHeapMax - heap_size);
 716         }
 717       }







 718     } else {
 719       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
 720       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
 721       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 722     }
 723 
 724     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
 725     // used in ReservedHeapSpace() constructors.
 726     // The final values will be set in initialize_heap() below.
 727     if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {

 728       // Use zero based compressed oops
 729       Universe::set_narrow_oop_base(NULL);
 730       // Don't need guard page for implicit checks in indexed
 731       // addressing mode with zero based Compressed Oops.
 732       Universe::set_narrow_oop_use_implicit_null_checks(true);
 733     } else {
 734       // Set to a non-NULL value so the ReservedSpace ctor computes
 735       // the correct no-access prefix.
 736       // The final value will be set in initialize_heap() below.
 737       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 738 #ifdef _WIN64
 739       if (UseLargePages) {
 740         // Cannot allocate guard pages for implicit checks in indexed
 741         // addressing mode when large pages are specified on windows.
 742         Universe::set_narrow_oop_use_implicit_null_checks(false);
 743       }
 744 #endif //  _WIN64
 745     }
 746   }
 747 #endif


 790 
 791   jint status = Universe::heap()->initialize();
 792   if (status != JNI_OK) {
 793     return status;
 794   }
 795 
 796 #ifdef _LP64
 797   if (UseCompressedOops) {
 798     // Subtract a page because something can get allocated at heap base.
 799     // This also makes implicit null checking work, because the
 800     // memory+1 page below heap_base needs to cause a signal.
 801     // See needs_explicit_null_check.
 802     // Only set the heap base for compressed oops because it indicates
 803     // compressed oops for pstack code.
 804     bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
 805     if (verbose) {
 806       tty->cr();
 807       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 808                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 809     }
 810     if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {


 811       // Can't reserve heap below 32Gb.
 812       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
 813       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 814       if (verbose) {
 815         tty->print(", %s: "PTR_FORMAT,
 816             narrow_oop_mode_to_string(HeapBasedNarrowOop),
 817             Universe::narrow_oop_base());
 818       }
 819     } else {
 820       Universe::set_narrow_oop_base(0);
 821       if (verbose) {
 822         tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
 823       }
 824 #ifdef _WIN64
 825       if (!Universe::narrow_oop_use_implicit_null_checks()) {
 826         // Don't need guard page for implicit checks in indexed addressing
 827         // mode with zero based Compressed Oops.
 828         Universe::set_narrow_oop_use_implicit_null_checks(true);
 829       }
 830 #endif //  _WIN64
 831       if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
 832         // Can't reserve heap below 4Gb.
 833         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 834       } else {
 835         Universe::set_narrow_oop_shift(0);
 836         if (verbose) {
 837           tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
 838         }
 839       }
 840     }
 841 
 842     if (verbose) {
 843       tty->cr();
 844       tty->cr();
 845     }




 846     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 847   }
 848   // Universe::narrow_oop_base() is one page below the heap.
 849   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
 850          os::vm_page_size()) ||

 851          Universe::narrow_oop_base() == NULL, "invalid value");
 852   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 853          Universe::narrow_oop_shift() == 0, "invalid value");
 854 #endif
 855 
 856   // We will never reach the CATCH below since Exceptions::_throw will cause
 857   // the VM to exit if an exception is thrown during initialization
 858 
 859   if (UseTLAB) {
 860     assert(Universe::heap()->supports_tlab_allocation(),
 861            "Should support thread-local allocation buffers");
 862     ThreadLocalAllocBuffer::startup_initialization();
 863   }
 864   return JNI_OK;
 865 }
 866 
 867 
 868 // Reserve the Java heap, which is now the same for all GCs.
 869 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 870   size_t total_reserved = align_size_up(heap_size, alignment);





 871   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 872       "heap size is too big for compressed oops");
 873   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 874 
 875   ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
 876 
 877   if (UseCompressedOops) {
 878     if (addr != NULL && !total_rs.is_reserved()) {
 879       // Failed to reserve at specified address - the requested memory
 880       // region is taken already, for example, by 'java' launcher.
 881       // Try again to reserver heap higher.
 882       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 883 
 884       ReservedHeapSpace total_rs0(total_reserved, alignment,
 885                                   UseLargePages, addr);
 886 
 887       if (addr != NULL && !total_rs0.is_reserved()) {
 888         // Failed to reserve at specified address again - give up.
 889         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 890         assert(addr == NULL, "");
 891 
 892         ReservedHeapSpace total_rs1(total_reserved, alignment,
 893                                     UseLargePages, addr);
 894         total_rs = total_rs1;
 895       } else {
 896         total_rs = total_rs0;
 897       }
 898     }
 899   }
 900 
 901   if (!total_rs.is_reserved()) {
 902     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 903     return total_rs;
 904   }
 905 











 906   if (UseCompressedOops) {
 907     // Universe::initialize_heap() will reset this to NULL if unscaled
 908     // or zero-based narrow oops are actually used.
 909     address base = (address)(total_rs.base() - os::vm_page_size());
 910     Universe::set_narrow_oop_base(base);
 911   }
 912   return total_rs;
 913 }
 914 
 915 
 916 // It's the caller's responsibility to ensure glitch-freedom
 917 // (if required).
 918 void Universe::update_heap_info_at_gc() {
 919   _heap_capacity_at_last_gc = heap()->capacity();
 920   _heap_used_at_last_gc     = heap()->used();
 921 }
 922 
 923 
 924 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
 925   switch (mode) {
 926     case UnscaledNarrowOop:
 927       return "32-bits Oops";
 928     case ZeroBasedNarrowOop:
 929       return "zero based Compressed Oops";
 930     case HeapBasedNarrowOop:
 931       return "Compressed Oops with base";
 932   }
 933 
 934   ShouldNotReachHere();
 935   return "";
 936 }


src/share/vm/memory/universe.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File