src/share/vm/memory/universe.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hsx-rt.8007074 Sdiff src/share/vm/memory

src/share/vm/memory/universe.cpp

Print this page




 664     StringTable::create_table();
 665     ClassLoader::create_package_info_table();
 666   }
 667 
 668   return JNI_OK;
 669 }
 670 
 671 // Choose the heap base address and oop encoding mode
 672 // when compressed oops are used:
 673 // Unscaled  - Use 32-bits oops without encoding when
 674 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 675 // ZeroBased - Use zero based compressed oops with encoding when
 676 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 677 // HeapBased - Use compressed oops with heap base + encoding.
 678 
 679 // 4Gb
 680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 681 // 32Gb
 682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 683 
 684 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {






 685   size_t base = 0;
 686 #ifdef _LP64
 687   if (UseCompressedOops) {
 688     assert(mode == UnscaledNarrowOop  ||
 689            mode == ZeroBasedNarrowOop ||
 690            mode == HeapBasedNarrowOop, "mode is invalid");
 691     const size_t total_size = heap_size + HeapBaseMinAddress;
 692     // Return specified base for the first request.
 693     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 694       base = HeapBaseMinAddress;
 695 
 696     // If the total size is small enough to allow UnscaledNarrowOop then
 697     // just use UnscaledNarrowOop.
 698     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
 699       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
 700           (Universe::narrow_oop_shift() == 0)) {
 701         // Use 32-bits oops without encoding and
 702         // place heap's top on the 4Gb boundary
 703         base = (NarrowOopHeapMax - heap_size);
 704       } else {
 705         // Can't reserve with NarrowOopShift == 0
 706         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 707         if (mode == UnscaledNarrowOop ||
 708             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 709           // Use zero based compressed oops with encoding and
 710           // place heap's top on the 32Gb boundary in case
 711           // total_size > 4Gb or failed to reserve below 4Gb.
 712           base = (OopEncodingHeapMax - heap_size);
 713         }
 714       }


 725       // Use zero based compressed oops
 726       Universe::set_narrow_oop_base(NULL);
 727       // Don't need guard page for implicit checks in indexed
 728       // addressing mode with zero based Compressed Oops.
 729       Universe::set_narrow_oop_use_implicit_null_checks(true);
 730     } else {
 731       // Set to a non-NULL value so the ReservedSpace ctor computes
 732       // the correct no-access prefix.
 733       // The final value will be set in initialize_heap() below.
 734       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 735 #ifdef _WIN64
 736       if (UseLargePages) {
 737         // Cannot allocate guard pages for implicit checks in indexed
 738         // addressing mode when large pages are specified on windows.
 739         Universe::set_narrow_oop_use_implicit_null_checks(false);
 740       }
 741 #endif //  _WIN64
 742     }
 743   }
 744 #endif


 745   return (char*)base; // also return NULL (don't care) for 32-bit VM
 746 }
 747 
 748 jint Universe::initialize_heap() {
 749 
 750   if (UseParallelGC) {
 751 #if INCLUDE_ALL_GCS
 752     Universe::_collectedHeap = new ParallelScavengeHeap();
 753 #else  // INCLUDE_ALL_GCS
 754     fatal("UseParallelGC not supported in this VM.");
 755 #endif // INCLUDE_ALL_GCS
 756 
 757   } else if (UseG1GC) {
 758 #if INCLUDE_ALL_GCS
 759     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 760     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 761     Universe::_collectedHeap = g1h;
 762 #else  // INCLUDE_ALL_GCS
 763     fatal("UseG1GC not supported in java kernel vm.");
 764 #endif // INCLUDE_ALL_GCS


 850          Universe::narrow_oop_shift() == 0, "invalid value");
 851 #endif
 852 
 853   // We will never reach the CATCH below since Exceptions::_throw will cause
 854   // the VM to exit if an exception is thrown during initialization
 855 
 856   if (UseTLAB) {
 857     assert(Universe::heap()->supports_tlab_allocation(),
 858            "Should support thread-local allocation buffers");
 859     ThreadLocalAllocBuffer::startup_initialization();
 860   }
 861   return JNI_OK;
 862 }
 863 
 864 
 865 // Reserve the Java heap, which is now the same for all GCs.
 866 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 867   size_t total_reserved = align_size_up(heap_size, alignment);
 868   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 869       "heap size is too big for compressed oops");
 870   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 871 
 872   ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);







 873 
 874   if (UseCompressedOops) {
 875     if (addr != NULL && !total_rs.is_reserved()) {
 876       // Failed to reserve at specified address - the requested memory
 877       // region is taken already, for example, by 'java' launcher.
 878       // Try again to reserver heap higher.
 879       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 880 
 881       ReservedHeapSpace total_rs0(total_reserved, alignment,
 882                                   UseLargePages, addr);
 883 
 884       if (addr != NULL && !total_rs0.is_reserved()) {
 885         // Failed to reserve at specified address again - give up.
 886         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 887         assert(addr == NULL, "");
 888 
 889         ReservedHeapSpace total_rs1(total_reserved, alignment,
 890                                     UseLargePages, addr);
 891         total_rs = total_rs1;
 892       } else {
 893         total_rs = total_rs0;
 894       }
 895     }
 896   }
 897 
 898   if (!total_rs.is_reserved()) {
 899     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 900     return total_rs;
 901   }
 902 
 903   if (UseCompressedOops) {
 904     // Universe::initialize_heap() will reset this to NULL if unscaled
 905     // or zero-based narrow oops are actually used.
 906     address base = (address)(total_rs.base() - os::vm_page_size());
 907     Universe::set_narrow_oop_base(base);
 908   }
 909   return total_rs;
 910 }




 664     StringTable::create_table();
 665     ClassLoader::create_package_info_table();
 666   }
 667 
 668   return JNI_OK;
 669 }
 670 
 671 // Choose the heap base address and oop encoding mode
 672 // when compressed oops are used:
 673 // Unscaled  - Use 32-bits oops without encoding when
 674 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 675 // ZeroBased - Use zero based compressed oops with encoding when
 676 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 677 // HeapBased - Use compressed oops with heap base + encoding.
 678 
 679 // 4Gb
 680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 681 // 32Gb
 682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 683 
 684 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
 685   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
 686   assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
 687   assert(is_size_aligned(heap_size, alignment), "Must be");
 688 
 689   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
 690 
 691   size_t base = 0;
 692 #ifdef _LP64
 693   if (UseCompressedOops) {
 694     assert(mode == UnscaledNarrowOop  ||
 695            mode == ZeroBasedNarrowOop ||
 696            mode == HeapBasedNarrowOop, "mode is invalid");
 697     const size_t total_size = heap_size + heap_base_min_address_aligned;
 698     // Return specified base for the first request.
 699     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 700       base = heap_base_min_address_aligned;
 701 
 702     // If the total size is small enough to allow UnscaledNarrowOop then
 703     // just use UnscaledNarrowOop.
 704     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
 705       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
 706           (Universe::narrow_oop_shift() == 0)) {
 707         // Use 32-bits oops without encoding and
 708         // place heap's top on the 4Gb boundary
 709         base = (NarrowOopHeapMax - heap_size);
 710       } else {
 711         // Can't reserve with NarrowOopShift == 0
 712         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 713         if (mode == UnscaledNarrowOop ||
 714             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 715           // Use zero based compressed oops with encoding and
 716           // place heap's top on the 32Gb boundary in case
 717           // total_size > 4Gb or failed to reserve below 4Gb.
 718           base = (OopEncodingHeapMax - heap_size);
 719         }
 720       }


 731       // Use zero based compressed oops
 732       Universe::set_narrow_oop_base(NULL);
 733       // Don't need guard page for implicit checks in indexed
 734       // addressing mode with zero based Compressed Oops.
 735       Universe::set_narrow_oop_use_implicit_null_checks(true);
 736     } else {
 737       // Set to a non-NULL value so the ReservedSpace ctor computes
 738       // the correct no-access prefix.
 739       // The final value will be set in initialize_heap() below.
 740       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 741 #ifdef _WIN64
 742       if (UseLargePages) {
 743         // Cannot allocate guard pages for implicit checks in indexed
 744         // addressing mode when large pages are specified on windows.
 745         Universe::set_narrow_oop_use_implicit_null_checks(false);
 746       }
 747 #endif //  _WIN64
 748     }
 749   }
 750 #endif
 751 
 752   assert(is_ptr_aligned((char*)base, alignment), "");
 753   return (char*)base; // also return NULL (don't care) for 32-bit VM
 754 }
 755 
 756 jint Universe::initialize_heap() {
 757 
 758   if (UseParallelGC) {
 759 #if INCLUDE_ALL_GCS
 760     Universe::_collectedHeap = new ParallelScavengeHeap();
 761 #else  // INCLUDE_ALL_GCS
 762     fatal("UseParallelGC not supported in this VM.");
 763 #endif // INCLUDE_ALL_GCS
 764 
 765   } else if (UseG1GC) {
 766 #if INCLUDE_ALL_GCS
 767     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 768     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 769     Universe::_collectedHeap = g1h;
 770 #else  // INCLUDE_ALL_GCS
 771     fatal("UseG1GC not supported in java kernel vm.");
 772 #endif // INCLUDE_ALL_GCS


 858          Universe::narrow_oop_shift() == 0, "invalid value");
 859 #endif
 860 
 861   // We will never reach the CATCH below since Exceptions::_throw will cause
 862   // the VM to exit if an exception is thrown during initialization
 863 
 864   if (UseTLAB) {
 865     assert(Universe::heap()->supports_tlab_allocation(),
 866            "Should support thread-local allocation buffers");
 867     ThreadLocalAllocBuffer::startup_initialization();
 868   }
 869   return JNI_OK;
 870 }
 871 
 872 
 873 // Reserve the Java heap, which is now the same for all GCs.
 874 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 875   size_t total_reserved = align_size_up(heap_size, alignment);
 876   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 877       "heap size is too big for compressed oops");

 878 
 879   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
 880   assert(!UseLargePages
 881       || UseParallelOldGC
 882       || use_large_pages, "Wrong alignment to use large pages");
 883 
 884   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
 885 
 886   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 887 
 888   if (UseCompressedOops) {
 889     if (addr != NULL && !total_rs.is_reserved()) {
 890       // Failed to reserve at specified address - the requested memory
 891       // region is taken already, for example, by 'java' launcher.
 892       // Try again to reserver heap higher.
 893       addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 894 
 895       ReservedHeapSpace total_rs0(total_reserved, alignment,
 896           use_large_pages, addr);
 897 
 898       if (addr != NULL && !total_rs0.is_reserved()) {
 899         // Failed to reserve at specified address again - give up.
 900         addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
 901         assert(addr == NULL, "");
 902 
 903         ReservedHeapSpace total_rs1(total_reserved, alignment,
 904             use_large_pages, addr);
 905         total_rs = total_rs1;
 906       } else {
 907         total_rs = total_rs0;
 908       }
 909     }
 910   }
 911 
 912   if (!total_rs.is_reserved()) {
 913     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 914     return total_rs;
 915   }
 916 
 917   if (UseCompressedOops) {
 918     // Universe::initialize_heap() will reset this to NULL if unscaled
 919     // or zero-based narrow oops are actually used.
 920     address base = (address)(total_rs.base() - os::vm_page_size());
 921     Universe::set_narrow_oop_base(base);
 922   }
 923   return total_rs;
 924 }


src/share/vm/memory/universe.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File