< prev index next >

src/share/vm/memory/universe.cpp

Print this page
rev 7280 : 8064457: Introduce compressed oops mode "disjoint base" and improve compressed heap handling.

@@ -689,107 +689,10 @@
 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 // ZeroBased - Use zero based compressed oops with encoding when
 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 // HeapBased - Use compressed oops with heap base + encoding.
 
-// 4Gb
-static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
-// 32Gb
-// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
-
-char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
-  assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
-  assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
-  assert(is_size_aligned(heap_size, alignment), "Must be");
-
-  uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
-
-  size_t base = 0;
-#ifdef _LP64
-  if (UseCompressedOops) {
-    assert(mode == UnscaledNarrowOop  ||
-           mode == ZeroBasedNarrowOop ||
-           mode == HeapBasedNarrowOop, "mode is invalid");
-    const size_t total_size = heap_size + heap_base_min_address_aligned;
-    // Return specified base for the first request.
-    if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      base = heap_base_min_address_aligned;
-
-    // If the total size is small enough to allow UnscaledNarrowOop then
-    // just use UnscaledNarrowOop.
-    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
-      if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
-          (Universe::narrow_oop_shift() == 0)) {
-        // Use 32-bits oops without encoding and
-        // place heap's top on the 4Gb boundary
-        base = (UnscaledOopHeapMax - heap_size);
-      } else {
-        // Can't reserve with NarrowOopShift == 0
-        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-
-        if (mode == UnscaledNarrowOop ||
-            mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
-
-          // Use zero based compressed oops with encoding and
-          // place heap's top on the 32Gb boundary in case
-          // total_size > 4Gb or failed to reserve below 4Gb.
-          uint64_t heap_top = OopEncodingHeapMax;
-
-          // For small heaps, save some space for compressed class pointer
-          // space so it can be decoded with no base.
-          if (UseCompressedClassPointers && !UseSharedSpaces &&
-              OopEncodingHeapMax <= 32*G) {
-
-            uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
-            assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
-                   alignment), "difference must be aligned too");
-            uint64_t new_top = OopEncodingHeapMax-class_space;
-
-            if (total_size <= new_top) {
-              heap_top = new_top;
-            }
-          }
-
-          // Align base to the adjusted top of the heap
-          base = heap_top - heap_size;
-        }
-      }
-    } else {
-      // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
-      // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
-      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-    }
-
-    // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
-    // used in ReservedHeapSpace() constructors.
-    // The final values will be set in initialize_heap() below.
-    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
-      // Use zero based compressed oops
-      Universe::set_narrow_oop_base(NULL);
-      // Don't need guard page for implicit checks in indexed
-      // addressing mode with zero based Compressed Oops.
-      Universe::set_narrow_oop_use_implicit_null_checks(true);
-    } else {
-      // Set to a non-NULL value so the ReservedSpace ctor computes
-      // the correct no-access prefix.
-      // The final value will be set in initialize_heap() below.
-      Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
-#if defined(_WIN64) || defined(AIX)
-      if (UseLargePages) {
-        // Cannot allocate guard pages for implicit checks in indexed
-        // addressing mode when large pages are specified on windows.
-        Universe::set_narrow_oop_use_implicit_null_checks(false);
-      }
-#endif //  _WIN64
-    }
-  }
-#endif
-
-  assert(is_ptr_aligned((char*)base, alignment), "Must be");
-  return (char*)base; // also return NULL (don't care) for 32-bit VM
-}
-
 jint Universe::initialize_heap() {
 
   if (UseParallelGC) {
 #if INCLUDE_ALL_GCS
     Universe::_collectedHeap = new ParallelScavengeHeap();

@@ -839,34 +742,17 @@
     // This also makes implicit null checking work, because the
     // memory+1 page below heap_base needs to cause a signal.
     // See needs_explicit_null_check.
     // Only set the heap base for compressed oops because it indicates
     // compressed oops for pstack code.
-    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
-      // Can't reserve heap below 32Gb.
-      // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
-      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-#ifdef AIX
-      // There is no protected page before the heap. This assures all oops
-      // are decoded so that NULL is preserved, so this page will not be accessed.
-      Universe::set_narrow_oop_use_implicit_null_checks(false);
-#endif
-    } else {
-      Universe::set_narrow_oop_base(0);
-#ifdef _WIN64
-      if (!Universe::narrow_oop_use_implicit_null_checks()) {
-        // Don't need guard page for implicit checks in indexed addressing
-        // mode with zero based Compressed Oops.
-        Universe::set_narrow_oop_use_implicit_null_checks(true);
-      }
-#endif //  _WIN64
-      if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
-        // Can't reserve heap below 4Gb.
+    if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
+      // Didn't reserve heap below 4Gb.  Must shift.
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-      } else {
-        Universe::set_narrow_oop_shift(0);
       }
+    if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
+      // Did reserve heap below 32Gb. Can use base == 0;
+      Universe::set_narrow_oop_base(0);
     }
 
     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 
     if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {

@@ -898,75 +784,208 @@
               Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 
   tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
 
   if (Universe::narrow_oop_base() != 0) {
-    tty->print(":" PTR_FORMAT, Universe::narrow_oop_base());
+    tty->print(": " PTR_FORMAT, Universe::narrow_oop_base());
   }
 
   if (Universe::narrow_oop_shift() != 0) {
     tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
   }
 
+  if (!Universe::narrow_oop_use_implicit_null_checks()) {
+    tty->print(", no protected page in front of the heap");
+  }
+
   tty->cr();
   tty->cr();
 }
 
-// Reserve the Java heap, which is now the same for all GCs.
+#define SIZE_64K  ((uint64_t)       0x10000ULL)
+#define SIZE_256M ((uint64_t)    0x10000000ULL)
+#define SIZE_32G  ((uint64_t)   0x800000000ULL)
+
+// Helper for heap allocation. Returns an array with addresses
+// (OS-specific) which are suited for disjoint base mode. Array is
+// NULL terminated.
+static char** get_attach_addresses_for_disjoint_mode() {
+  static uintptr_t addresses[] = {
+#ifdef _LP64
+     2 * SIZE_32G,
+     3 * SIZE_32G,
+     4 * SIZE_32G,
+     8 * SIZE_32G,
+    10 * SIZE_32G,
+     1 * SIZE_64K * SIZE_32G,
+     2 * SIZE_64K * SIZE_32G,
+     3 * SIZE_64K * SIZE_32G,
+     4 * SIZE_64K * SIZE_32G,
+    16 * SIZE_64K * SIZE_32G,
+    32 * SIZE_64K * SIZE_32G,
+    34 * SIZE_64K * SIZE_32G,
+#endif
+    0
+  };
+
+  // Sort out addresses smaller than HeapBaseMinAddress. This assumes
+  // the array is sorted.
+  uint i = 0;
+  while (addresses[i] != 0 &&
+         (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
+    i++;
+  }
+  uint start = i;
+
+  // Avoid more steps than requested.
+  i = 0;
+  while (addresses[start+i] != 0) {
+    if (i == HeapSearchSteps) {
+      addresses[start+i] = 0;
+      break;
+    }
+    i++;
+  }
+
+  return (char**) &addresses[start];
+}
+
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+
   assert(alignment <= Arguments::conservative_max_heap_alignment(),
       err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
           alignment, Arguments::conservative_max_heap_alignment()));
+
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
 
   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   assert(!UseLargePages
       || UseParallelGC
       || use_large_pages, "Wrong alignment to use large pages");
 
-  char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
-
-  ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
+  // Address where to allocate the heap. NULL: anywhere.
+  char* addr = NULL;
+  size_t disjoint_noaccess_prefix = 0;
 
   if (UseCompressedOops) {
-    if (addr != NULL && !total_rs.is_reserved()) {
-      // Failed to reserve at specified address - the requested memory
-      // region is taken already, for example, by 'java' launcher.
-      // Try again to reserver heap higher.
-      addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
-
-      ReservedHeapSpace total_rs0(total_reserved, alignment,
-          use_large_pages, addr);
-
-      if (addr != NULL && !total_rs0.is_reserved()) {
-        // Failed to reserve at specified address again - give up.
-        addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
-        assert(addr == NULL, "");
+    // Try to get a heap by:
+    //  0) if HeapBaseMinAddress is set, try this address first.
+    //  1) get heap for unscaled (base = 0, shift = 0)
+    //  2) failing that, get heap for zerobased (base = 0, shift != 0)
+    //  3) failing that, get heap for disjointbase  (base != 0, shift != 0)
+    //  4) failing that, any heap will do.
+
+    // Loop over compressed oop modes; try to obtain a fitting memory range;
+    // if success, release it again and let ReservedHeapSpace attempt to
+    // allocate in the same range.
+    for (int i = 0; i <= 4; i++) {
+      disjoint_noaccess_prefix = 0;
+      switch (i) {
+      case 0:
+        // Attempt to alloc at user-given address.
+        if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
+          addr = os::attempt_reserve_memory_at(total_reserved, (char *)HeapBaseMinAddress);
+          if (is_disjoint_heap_base_address((address)addr)) {
+            disjoint_noaccess_prefix = ReservedHeapSpace::noaccess_prefix_size(alignment);
+          }
+        }
+        break;
+      case 1:
+        // Attempt to alloc for unscaled.
+        addr = os::attempt_reserve_memory_in_range(total_reserved, alignment,
+                                                   (char*) HeapBaseMinAddress,
+                                                   (char*) UnscaledOopHeapMax,
+                                                   HeapSearchSteps);
+        break;
+      case 2:
+        {
+          // zerobased: Attempt to allocate in the lower 32G.
+          // But leave room for the compressed class pointers.
+          char* zerobased_max = (char*)OopEncodingHeapMax;
 
-        ReservedHeapSpace total_rs1(total_reserved, alignment,
-            use_large_pages, addr);
-        total_rs = total_rs1;
-      } else {
-        total_rs = total_rs0;
+          // For small heaps, save some space for compressed class pointer
+          // space so it can be decoded with no base.
+          if (UseCompressedClassPointers && !UseSharedSpaces &&
+              OopEncodingHeapMax <= 32*G) {
+            uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
+            assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
+                   alignment), "difference must be aligned too");
+            zerobased_max = (char*) OopEncodingHeapMax - class_space;
       }
+
+          addr = os::attempt_reserve_memory_in_range(total_reserved, alignment,
+                                                     (char*) MAX2((char*)UnscaledOopHeapMax, (char*)HeapBaseMinAddress),
+                                                     (char*) zerobased_max,
+                                                     HeapSearchSteps);
+        }
+        break;
+      case 3:
+        // disjointbase. Here we just try a bushel of OS-dependend known
+        // disjoint-based friendly addresses.
+        {
+          char** addresses = get_attach_addresses_for_disjoint_mode();
+          addr = os::attempt_reserve_memory_at_multiple(total_reserved, addresses);
+          disjoint_noaccess_prefix = ReservedHeapSpace::noaccess_prefix_size(alignment);
+        }
+        break;
+      case 4:
+        addr = 0;
+        break;
+      default:
+        ShouldNotReachHere();
     }
+
+      // If we could not find space for the current mode, try the next mode.
+      if (!addr && i < 4) {
+        continue;
   }
 
-  if (!total_rs.is_reserved()) {
-    vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
-    return total_rs;
+      // If we did find space, release space; ReservedHeapSpace will allocate
+      // again.
+      if (addr) {
+        os::release_memory(addr, total_reserved);
+        break;  // Quit the for loop.
+      }
+
+    } // for loop
+  }
+
+  // now create the space
+  ReservedHeapSpace total_rs(total_reserved, alignment,
+                             use_large_pages, addr + disjoint_noaccess_prefix);
+
+  if (addr != NULL && !total_rs.is_reserved()) {
+    // Try arbitrary position.
+    ReservedHeapSpace total_rs1(total_reserved, alignment, use_large_pages, NULL);
+    disjoint_noaccess_prefix = 0;
+    total_rs = total_rs1;
   }
 
+  if (total_rs.is_reserved()) {
+    // we are good.
+
   if (UseCompressedOops) {
     // Universe::initialize_heap() will reset this to NULL if unscaled
     // or zero-based narrow oops are actually used.
-    address base = (address)(total_rs.base() - os::vm_page_size());
+      // SAPJVM GL 2014-09-22
+      // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
+      address base = (address)(total_rs.base() - ReservedHeapSpace::noaccess_prefix_size(alignment));
     Universe::set_narrow_oop_base(base);
   }
+
   return total_rs;
+  }
+
+  vm_exit_during_initialization(
+    err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
+            total_reserved/K));
+
+  // satisfy compiler
+  ShouldNotReachHere();
+  return ReservedHeapSpace(0, 0, false, 0);
 }
 
 
 // It's the caller's responsibility to ensure glitch-freedom
 // (if required).

@@ -980,20 +999,26 @@
   switch (mode) {
     case UnscaledNarrowOop:
       return "32-bit";
     case ZeroBasedNarrowOop:
       return "Zero based";
+    case DisjointBaseNarrowOop:
+      return "Non-zero disjoint base";
     case HeapBasedNarrowOop:
       return "Non-zero based";
   }
 
   ShouldNotReachHere();
   return "";
 }
 
 
 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
+  if (narrow_oop_base_disjoint()) {
+    return DisjointBaseNarrowOop;
+  }
+
   if (narrow_oop_base() != 0) {
     return HeapBasedNarrowOop;
   }
 
   if (narrow_oop_shift() != 0) {
< prev index next >