< prev index next >

src/share/vm/memory/universe.cpp

Print this page
rev 7280 : 8064457: Introduce compressed oops mode "disjoint base" and improve compressed heap handling.


 674     SymbolTable::create_table();
 675     StringTable::create_table();
 676     ClassLoader::create_package_info_table();
 677 
 678     if (DumpSharedSpaces) {
 679       MetaspaceShared::prepare_for_dumping();
 680     }
 681   }
 682 
 683   return JNI_OK;
 684 }
 685 
 686 // Choose the heap base address and oop encoding mode
 687 // when compressed oops are used:
 688 // Unscaled  - Use 32-bits oops without encoding when
 689 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 690 // ZeroBased - Use zero based compressed oops with encoding when
 691 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 692 // HeapBased - Use compressed oops with heap base + encoding.
 693 
 694 // 4Gb
 695 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
 696 // 32Gb
 697 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
 698 
 699 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
 700   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
 701   assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
 702   assert(is_size_aligned(heap_size, alignment), "Must be");
 703 
 704   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
 705 
 706   size_t base = 0;
 707 #ifdef _LP64
 708   if (UseCompressedOops) {
 709     assert(mode == UnscaledNarrowOop  ||
 710            mode == ZeroBasedNarrowOop ||
 711            mode == HeapBasedNarrowOop, "mode is invalid");
 712     const size_t total_size = heap_size + heap_base_min_address_aligned;
 713     // Return specified base for the first request.
 714     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 715       base = heap_base_min_address_aligned;
 716 
 717     // If the total size is small enough to allow UnscaledNarrowOop then
 718     // just use UnscaledNarrowOop.
 719     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
 720       if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
 721           (Universe::narrow_oop_shift() == 0)) {
 722         // Use 32-bits oops without encoding and
 723         // place heap's top on the 4Gb boundary
 724         base = (UnscaledOopHeapMax - heap_size);
 725       } else {
 726         // Can't reserve with NarrowOopShift == 0
 727         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 728 
 729         if (mode == UnscaledNarrowOop ||
 730             mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
 731 
 732           // Use zero based compressed oops with encoding and
 733           // place heap's top on the 32Gb boundary in case
 734           // total_size > 4Gb or failed to reserve below 4Gb.
 735           uint64_t heap_top = OopEncodingHeapMax;
 736 
 737           // For small heaps, save some space for compressed class pointer
 738           // space so it can be decoded with no base.
 739           if (UseCompressedClassPointers && !UseSharedSpaces &&
 740               OopEncodingHeapMax <= 32*G) {
 741 
 742             uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 743             assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
 744                    alignment), "difference must be aligned too");
 745             uint64_t new_top = OopEncodingHeapMax-class_space;
 746 
 747             if (total_size <= new_top) {
 748               heap_top = new_top;
 749             }
 750           }
 751 
 752           // Align base to the adjusted top of the heap
 753           base = heap_top - heap_size;
 754         }
 755       }
 756     } else {
 757       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
 758       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
 759       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 760     }
 761 
 762     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
 763     // used in ReservedHeapSpace() constructors.
 764     // The final values will be set in initialize_heap() below.
 765     if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
 766       // Use zero based compressed oops
 767       Universe::set_narrow_oop_base(NULL);
 768       // Don't need guard page for implicit checks in indexed
 769       // addressing mode with zero based Compressed Oops.
 770       Universe::set_narrow_oop_use_implicit_null_checks(true);
 771     } else {
 772       // Set to a non-NULL value so the ReservedSpace ctor computes
 773       // the correct no-access prefix.
 774       // The final value will be set in initialize_heap() below.
 775       Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
 776 #if defined(_WIN64) || defined(AIX)
 777       if (UseLargePages) {
 778         // Cannot allocate guard pages for implicit checks in indexed
 779         // addressing mode when large pages are specified on windows.
 780         Universe::set_narrow_oop_use_implicit_null_checks(false);
 781       }
 782 #endif //  _WIN64
 783     }
 784   }
 785 #endif
 786 
 787   assert(is_ptr_aligned((char*)base, alignment), "Must be");
 788   return (char*)base; // also return NULL (don't care) for 32-bit VM
 789 }
 790 
 791 jint Universe::initialize_heap() {
 792 
 793   if (UseParallelGC) {
 794 #if INCLUDE_ALL_GCS
 795     Universe::_collectedHeap = new ParallelScavengeHeap();
 796 #else  // INCLUDE_ALL_GCS
 797     fatal("UseParallelGC not supported in this VM.");
 798 #endif // INCLUDE_ALL_GCS
 799 
 800   } else if (UseG1GC) {
 801 #if INCLUDE_ALL_GCS
 802     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 803     g1p->initialize_all();
 804     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 805     Universe::_collectedHeap = g1h;
 806 #else  // INCLUDE_ALL_GCS
 807     fatal("UseG1GC not supported in java kernel vm.");
 808 #endif // INCLUDE_ALL_GCS
 809 
 810   } else {


 824     gc_policy->initialize_all();
 825 
 826     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
 827   }
 828 
 829   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
 830 
 831   jint status = Universe::heap()->initialize();
 832   if (status != JNI_OK) {
 833     return status;
 834   }
 835 
 836 #ifdef _LP64
 837   if (UseCompressedOops) {
 838     // Subtract a page because something can get allocated at heap base.
 839     // This also makes implicit null checking work, because the
 840     // memory+1 page below heap_base needs to cause a signal.
 841     // See needs_explicit_null_check.
 842     // Only set the heap base for compressed oops because it indicates
 843     // compressed oops for pstack code.
 844     if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
 845       // Can't reserve heap below 32Gb.
 846       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
 847       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 848 #ifdef AIX
 849       // There is no protected page before the heap. This assures all oops
 850       // are decoded so that NULL is preserved, so this page will not be accessed.
 851       Universe::set_narrow_oop_use_implicit_null_checks(false);
 852 #endif
 853     } else {
 854       Universe::set_narrow_oop_base(0);
 855 #ifdef _WIN64
 856       if (!Universe::narrow_oop_use_implicit_null_checks()) {
 857         // Don't need guard page for implicit checks in indexed addressing
 858         // mode with zero based Compressed Oops.
 859         Universe::set_narrow_oop_use_implicit_null_checks(true);
 860       }
 861 #endif //  _WIN64
 862       if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
 863         // Can't reserve heap below 4Gb.
 864         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 865       } else {
 866         Universe::set_narrow_oop_shift(0);
 867       }



 868     }
 869 
 870     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 871 
 872     if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
 873       Universe::print_compressed_oops_mode();
 874     }
 875   }
 876   // Universe::narrow_oop_base() is one page below the heap.
 877   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
 878          os::vm_page_size()) ||
 879          Universe::narrow_oop_base() == NULL, "invalid value");
 880   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 881          Universe::narrow_oop_shift() == 0, "invalid value");
 882 #endif
 883 
 884   // We will never reach the CATCH below since Exceptions::_throw will cause
 885   // the VM to exit if an exception is thrown during initialization
 886 
 887   if (UseTLAB) {
 888     assert(Universe::heap()->supports_tlab_allocation(),
 889            "Should support thread-local allocation buffers");
 890     ThreadLocalAllocBuffer::startup_initialization();
 891   }
 892   return JNI_OK;
 893 }
 894 
 895 void Universe::print_compressed_oops_mode() {
 896   tty->cr();
 897   tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 898               Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 899 
 900   tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
 901 
 902   if (Universe::narrow_oop_base() != 0) {
 903     tty->print(":" PTR_FORMAT, Universe::narrow_oop_base());
 904   }
 905 
 906   if (Universe::narrow_oop_shift() != 0) {
 907     tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
 908   }
 909 




 910   tty->cr();
 911   tty->cr();
 912 }
 913 
 914 // Reserve the Java heap, which is now the same for all GCs.















































 915 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {

 916   assert(alignment <= Arguments::conservative_max_heap_alignment(),
 917       err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
 918           alignment, Arguments::conservative_max_heap_alignment()));

 919   size_t total_reserved = align_size_up(heap_size, alignment);
 920   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 921       "heap size is too big for compressed oops");
 922 
 923   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
 924   assert(!UseLargePages
 925       || UseParallelGC
 926       || use_large_pages, "Wrong alignment to use large pages");
 927 
 928   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
 929 
 930   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 931 
 932   if (UseCompressedOops) {
 933     if (addr != NULL && !total_rs.is_reserved()) {
 934       // Failed to reserve at specified address - the requested memory
 935       // region is taken already, for example, by 'java' launcher.
 936       // Try again to reserver heap higher.
 937       addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 938 
 939       ReservedHeapSpace total_rs0(total_reserved, alignment,
 940           use_large_pages, addr);
 941 
 942       if (addr != NULL && !total_rs0.is_reserved()) {
 943         // Failed to reserve at specified address again - give up.
 944         addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
 945         assert(addr == NULL, "");





















 946 
 947         ReservedHeapSpace total_rs1(total_reserved, alignment,
 948             use_large_pages, addr);
 949         total_rs = total_rs1;
 950       } else {
 951         total_rs = total_rs0;



 952       }





















 953     }




 954   }
 955 
 956   if (!total_rs.is_reserved()) {
 957     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 958     return total_rs;
















 959   }
 960 



 961   if (UseCompressedOops) {
 962     // Universe::initialize_heap() will reset this to NULL if unscaled
 963     // or zero-based narrow oops are actually used.
 964     address base = (address)(total_rs.base() - os::vm_page_size());


 965     Universe::set_narrow_oop_base(base);
 966   }

 967   return total_rs;









 968 }
 969 
 970 
 971 // It's the caller's responsibility to ensure glitch-freedom
 972 // (if required).
 973 void Universe::update_heap_info_at_gc() {
 974   _heap_capacity_at_last_gc = heap()->capacity();
 975   _heap_used_at_last_gc     = heap()->used();
 976 }
 977 
 978 
 979 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
 980   switch (mode) {
 981     case UnscaledNarrowOop:
 982       return "32-bit";
 983     case ZeroBasedNarrowOop:
 984       return "Zero based";


 985     case HeapBasedNarrowOop:
 986       return "Non-zero based";
 987   }
 988 
 989   ShouldNotReachHere();
 990   return "";
 991 }
 992 
 993 
 994 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {




 995   if (narrow_oop_base() != 0) {
 996     return HeapBasedNarrowOop;
 997   }
 998 
 999   if (narrow_oop_shift() != 0) {
1000     return ZeroBasedNarrowOop;
1001   }
1002 
1003   return UnscaledNarrowOop;
1004 }
1005 
1006 
1007 void universe2_init() {
1008   EXCEPTION_MARK;
1009   Universe::genesis(CATCH);
1010 }
1011 
1012 
1013 bool universe_post_init() {
1014   assert(!is_init_completed(), "Error: initialization not yet completed!");




 674     SymbolTable::create_table();
 675     StringTable::create_table();
 676     ClassLoader::create_package_info_table();
 677 
 678     if (DumpSharedSpaces) {
 679       MetaspaceShared::prepare_for_dumping();
 680     }
 681   }
 682 
 683   return JNI_OK;
 684 }
 685 
 686 // Choose the heap base address and oop encoding mode
 687 // when compressed oops are used:
 688 // Unscaled  - Use 32-bits oops without encoding when
 689 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 690 // ZeroBased - Use zero based compressed oops with encoding when
 691 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 692 // HeapBased - Use compressed oops with heap base + encoding.
 693 

































































































 694 jint Universe::initialize_heap() {
 695 
 696   if (UseParallelGC) {
 697 #if INCLUDE_ALL_GCS
 698     Universe::_collectedHeap = new ParallelScavengeHeap();
 699 #else  // INCLUDE_ALL_GCS
 700     fatal("UseParallelGC not supported in this VM.");
 701 #endif // INCLUDE_ALL_GCS
 702 
 703   } else if (UseG1GC) {
 704 #if INCLUDE_ALL_GCS
 705     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 706     g1p->initialize_all();
 707     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 708     Universe::_collectedHeap = g1h;
 709 #else  // INCLUDE_ALL_GCS
 710     fatal("UseG1GC not supported in java kernel vm.");
 711 #endif // INCLUDE_ALL_GCS
 712 
 713   } else {


 727     gc_policy->initialize_all();
 728 
 729     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
 730   }
 731 
 732   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
 733 
 734   jint status = Universe::heap()->initialize();
 735   if (status != JNI_OK) {
 736     return status;
 737   }
 738 
 739 #ifdef _LP64
 740   if (UseCompressedOops) {
 741     // Subtract a page because something can get allocated at heap base.
 742     // This also makes implicit null checking work, because the
 743     // memory+1 page below heap_base needs to cause a signal.
 744     // See needs_explicit_null_check.
 745     // Only set the heap base for compressed oops because it indicates
 746     // compressed oops for pstack code.
 747     if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
 748       // Didn't reserve heap below 4Gb.  Must shift.


















 749       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);


 750     }
 751     if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
 752       // Did reserve heap below 32Gb. Can use base == 0;
 753       Universe::set_narrow_oop_base(0);
 754     }
 755 
 756     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 757 
 758     if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
 759       Universe::print_compressed_oops_mode();
 760     }
 761   }
 762   // Universe::narrow_oop_base() is one page below the heap.
 763   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
 764          os::vm_page_size()) ||
 765          Universe::narrow_oop_base() == NULL, "invalid value");
 766   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 767          Universe::narrow_oop_shift() == 0, "invalid value");
 768 #endif
 769 
 770   // We will never reach the CATCH below since Exceptions::_throw will cause
 771   // the VM to exit if an exception is thrown during initialization
 772 
 773   if (UseTLAB) {
 774     assert(Universe::heap()->supports_tlab_allocation(),
 775            "Should support thread-local allocation buffers");
 776     ThreadLocalAllocBuffer::startup_initialization();
 777   }
 778   return JNI_OK;
 779 }
 780 
 781 void Universe::print_compressed_oops_mode() {
 782   tty->cr();
 783   tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 784               Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 785 
 786   tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
 787 
 788   if (Universe::narrow_oop_base() != 0) {
 789     tty->print(": " PTR_FORMAT, Universe::narrow_oop_base());
 790   }
 791 
 792   if (Universe::narrow_oop_shift() != 0) {
 793     tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
 794   }
 795 
 796   if (!Universe::narrow_oop_use_implicit_null_checks()) {
 797     tty->print(", no protected page in front of the heap");
 798   }
 799 
 800   tty->cr();
 801   tty->cr();
 802 }
 803 
 804 #define SIZE_64K  ((uint64_t)       0x10000ULL)
 805 #define SIZE_256M ((uint64_t)    0x10000000ULL)
 806 #define SIZE_32G  ((uint64_t)   0x800000000ULL)
 807 
 808 // Helper for heap allocation. Returns an array with addresses
 809 // (OS-specific) which are suited for disjoint base mode. Array is
 810 // NULL terminated.
 811 static char** get_attach_addresses_for_disjoint_mode() {
 812   static uintptr_t addresses[] = {
 813 #ifdef _LP64
 814      2 * SIZE_32G,
 815      3 * SIZE_32G,
 816      4 * SIZE_32G,
 817      8 * SIZE_32G,
 818     10 * SIZE_32G,
 819      1 * SIZE_64K * SIZE_32G,
 820      2 * SIZE_64K * SIZE_32G,
 821      3 * SIZE_64K * SIZE_32G,
 822      4 * SIZE_64K * SIZE_32G,
 823     16 * SIZE_64K * SIZE_32G,
 824     32 * SIZE_64K * SIZE_32G,
 825     34 * SIZE_64K * SIZE_32G,
 826 #endif
 827     0
 828   };
 829 
 830   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 831   // the array is sorted.
 832   uint i = 0;
 833   while (addresses[i] != 0 &&
 834          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 835     i++;
 836   }
 837   uint start = i;
 838 
 839   // Avoid more steps than requested.
 840   i = 0;
 841   while (addresses[start+i] != 0) {
 842     if (i == HeapSearchSteps) {
 843       addresses[start+i] = 0;
 844       break;
 845     }
 846     i++;
 847   }
 848 
 849   return (char**) &addresses[start];
 850 }
 851 
 852 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 853 
 854   assert(alignment <= Arguments::conservative_max_heap_alignment(),
 855       err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
 856           alignment, Arguments::conservative_max_heap_alignment()));
 857 
 858   size_t total_reserved = align_size_up(heap_size, alignment);
 859   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 860       "heap size is too big for compressed oops");
 861 
 862   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
 863   assert(!UseLargePages
 864       || UseParallelGC
 865       || use_large_pages, "Wrong alignment to use large pages");
 866 
 867   // Address where to allocate the heap. NULL: anywhere.
 868   char* addr = NULL;
 869   size_t disjoint_noaccess_prefix = 0;
 870 
 871   if (UseCompressedOops) {
 872     // Try to get a heap by:
 873     //  0) if HeapBaseMinAddress is set, try this address first.
 874     //  1) get heap for unscaled (base = 0, shift = 0)
 875     //  2) failing that, get heap for zerobased (base = 0, shift != 0)
 876     //  3) failing that, get heap for disjointbase  (base != 0, shift != 0)
 877     //  4) failing that, any heap will do.
 878 
 879     // Loop over compressed oop modes; try to obtain a fitting memory range;
 880     // if success, release it again and let ReservedHeapSpace attempt to
 881     // allocate in the same range.
 882     for (int i = 0; i <= 4; i++) {
 883       disjoint_noaccess_prefix = 0;
 884       switch (i) {
 885       case 0:
 886         // Attempt to alloc at user-given address.
 887         if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 888           addr = os::attempt_reserve_memory_at(total_reserved, (char *)HeapBaseMinAddress);
 889           if (is_disjoint_heap_base_address((address)addr)) {
 890             disjoint_noaccess_prefix = ReservedHeapSpace::noaccess_prefix_size(alignment);
 891           }
 892         }
 893         break;
 894       case 1:
 895         // Attempt to alloc for unscaled.
 896         addr = os::attempt_reserve_memory_in_range(total_reserved, alignment,
 897                                                    (char*) HeapBaseMinAddress,
 898                                                    (char*) UnscaledOopHeapMax,
 899                                                    HeapSearchSteps);
 900         break;
 901       case 2:
 902         {
 903           // zerobased: Attempt to allocate in the lower 32G.
 904           // But leave room for the compressed class pointers.
 905           char* zerobased_max = (char*)OopEncodingHeapMax;
 906 
 907           // For small heaps, save some space for compressed class pointer
 908           // space so it can be decoded with no base.
 909           if (UseCompressedClassPointers && !UseSharedSpaces &&
 910               OopEncodingHeapMax <= 32*G) {
 911             uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 912             assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
 913                    alignment), "difference must be aligned too");
 914             zerobased_max = (char*) OopEncodingHeapMax - class_space;
 915           }
 916 
 917           addr = os::attempt_reserve_memory_in_range(total_reserved, alignment,
 918                                                      (char*) MAX2((char*)UnscaledOopHeapMax, (char*)HeapBaseMinAddress),
 919                                                      (char*) zerobased_max,
 920                                                      HeapSearchSteps);
 921         }
 922         break;
 923       case 3:
 924         // disjointbase. Here we just try a bushel of OS-dependend known
 925         // disjoint-based friendly addresses.
 926         {
 927           char** addresses = get_attach_addresses_for_disjoint_mode();
 928           addr = os::attempt_reserve_memory_at_multiple(total_reserved, addresses);
 929           disjoint_noaccess_prefix = ReservedHeapSpace::noaccess_prefix_size(alignment);
 930         }
 931         break;
 932       case 4:
 933         addr = 0;
 934         break;
 935       default:
 936         ShouldNotReachHere();
 937       }
 938 
 939       // If we could not find space for the current mode, try the next mode.
 940       if (!addr && i < 4) {
 941         continue;
 942       }
 943 
 944       // If we did find space, release space; ReservedHeapSpace will allocate
 945       // again.
 946       if (addr) {
 947         os::release_memory(addr, total_reserved);
 948         break;  // Quit the for loop.
 949       }
 950 
 951     } // for loop
 952   }
 953 
 954   // now create the space
 955   ReservedHeapSpace total_rs(total_reserved, alignment,
 956                              use_large_pages, addr + disjoint_noaccess_prefix);
 957 
 958   if (addr != NULL && !total_rs.is_reserved()) {
 959     // Try arbitrary position.
 960     ReservedHeapSpace total_rs1(total_reserved, alignment, use_large_pages, NULL);
 961     disjoint_noaccess_prefix = 0;
 962     total_rs = total_rs1;
 963   }
 964 
 965   if (total_rs.is_reserved()) {
 966     // we are good.
 967 
 968     if (UseCompressedOops) {
 969       // Universe::initialize_heap() will reset this to NULL if unscaled
 970       // or zero-based narrow oops are actually used.
 971       // SAPJVM GL 2014-09-22
 972       // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
 973       address base = (address)(total_rs.base() - ReservedHeapSpace::noaccess_prefix_size(alignment));
 974       Universe::set_narrow_oop_base(base);
 975     }
 976 
 977     return total_rs;
 978   }
 979 
 980   vm_exit_during_initialization(
 981     err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
 982             total_reserved/K));
 983 
 984   // satisfy compiler
 985   ShouldNotReachHere();
 986   return ReservedHeapSpace(0, 0, false, 0);
 987 }
 988 
 989 
 990 // It's the caller's responsibility to ensure glitch-freedom
 991 // (if required).
 992 void Universe::update_heap_info_at_gc() {
 993   _heap_capacity_at_last_gc = heap()->capacity();
 994   _heap_used_at_last_gc     = heap()->used();
 995 }
 996 
 997 
 998 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
 999   switch (mode) {
1000     case UnscaledNarrowOop:
1001       return "32-bit";
1002     case ZeroBasedNarrowOop:
1003       return "Zero based";
1004     case DisjointBaseNarrowOop:
1005       return "Non-zero disjoint base";
1006     case HeapBasedNarrowOop:
1007       return "Non-zero based";
1008   }
1009 
1010   ShouldNotReachHere();
1011   return "";
1012 }
1013 
1014 
1015 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
1016   if (narrow_oop_base_disjoint()) {
1017     return DisjointBaseNarrowOop;
1018   }
1019 
1020   if (narrow_oop_base() != 0) {
1021     return HeapBasedNarrowOop;
1022   }
1023 
1024   if (narrow_oop_shift() != 0) {
1025     return ZeroBasedNarrowOop;
1026   }
1027 
1028   return UnscaledNarrowOop;
1029 }
1030 
1031 
1032 void universe2_init() {
1033   EXCEPTION_MARK;
1034   Universe::genesis(CATCH);
1035 }
1036 
1037 
1038 bool universe_post_init() {
1039   assert(!is_init_completed(), "Error: initialization not yet completed!");


< prev index next >