1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcLogPrecious.hpp"
  26 #include "gc/z/zAddressSpaceLimit.hpp"
  27 #include "gc/z/zAddress.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zVirtualMemory.inline.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/debug.hpp"
  32 #include "utilities/align.hpp"
  33 
  34 ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
  35     _manager(),
  36     _initialized(false) {
  37 
  38   // Check max supported heap size
  39   if (max_capacity > ZAddressOffsetMax) {
  40     log_error_p(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)",
  41                     ZAddressOffsetMax / G);
  42     return;
  43   }
  44 
  45   // Reserve address space
  46   if (!reserve(max_capacity)) {
  47     // try to reserve with a reduced number of address bits
  48     log_info_p(gc, init)("Re-configuring ZAddressBits to " SIZE_FORMAT ", allocation with " SIZE_FORMAT " failed", _address_bits_seen, ZAddressBits);
  49     ZAddressBits = _address_bits_seen;
  50     ZAddress::initialize();
  51     if (!reserve(max_capacity)) {
  52       log_error_pd(gc)("Failed to reserve enough address space for Java heap");
  53       return;
  54     }
  55   }
  56 
  57   // Initialize OS specific parts
  58   initialize_os();
  59 
  60   // Successfully initialized
  61   _initialized = true;
  62 }
  63 
  64 size_t ZVirtualMemoryManager::reserve_discontiguous(uintptr_t start, size_t size, size_t min_range) {
  65   if (size < min_range) {
  66     // Too small
  67     return 0;
  68   }
  69 
  70   assert(is_aligned(size, ZGranuleSize), "Misaligned");
  71 
  72   if (reserve_contiguous_platform(start, size)) {
  73     // Make the address range free
  74     _manager.free(start, size);
  75     return size;
  76   }
  77 
  78   const size_t half = size / 2;
  79   if (half < min_range) {
  80     // Too small
  81     return 0;
  82   }
  83 
  84   // Divide and conquer
  85   const size_t first_part = align_down(half, ZGranuleSize);
  86   const size_t second_part = size - first_part;
  87   return reserve_discontiguous(start, first_part, min_range) +
  88          reserve_discontiguous(start + first_part, second_part, min_range);
  89 }
  90 
  91 size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
  92   // Don't try to reserve address ranges smaller than 1% of the requested size.
  93   // This avoids an explosion of reservation attempts in case large parts of the
  94   // address space is already occupied.
  95   const size_t min_range = align_up(size / 100, ZGranuleSize);
  96   size_t start = 0;
  97   size_t reserved = 0;
  98 
  99   // Reserve size somewhere between [0, ZAddressOffsetMax)
 100   while (reserved < size && start < ZAddressOffsetMax) {
 101     const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
 102     reserved += reserve_discontiguous(start, remaining, min_range);
 103     start += remaining;
 104   }
 105 
 106   return reserved;
 107 }
 108 
 109 bool ZVirtualMemoryManager::reserve_contiguous(size_t size) {
 110   // Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax)
 111   const size_t end = ZAddressOffsetMax - size;
 112   const size_t increment = align_up(end / 8192, ZGranuleSize);
 113 
 114   for (size_t start = 0; start <= end; start += increment) {
 115     if (reserve_contiguous_platform(start, size)) {
 116       // Make the address range free
 117       _manager.free(start, size);
 118 
 119       // Success
 120       return true;
 121     }
 122   }
 123 
 124   // Failed
 125   return false;
 126 }
 127 
 128 bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
 129   const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap_view());
 130   const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit);
 131 
 132   size_t reserved = size;
 133   bool contiguous = true;
 134 
 135   // Prefer a contiguous address space
 136   if (!reserve_contiguous(size)) {
 137     // Fall back to a discontiguous address space
 138     reserved = reserve_discontiguous(size);
 139     contiguous = false;
 140   }
 141 
 142   log_info_p(gc, init)("Address Space Type: %s/%s/%s",
 143                        (contiguous ? "Contiguous" : "Discontiguous"),
 144                        (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"),
 145                        (reserved == size ? "Complete" : "Degraded"));
 146   log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
 147                        reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
 148 
 149   return reserved >= max_capacity;
 150 }
 151 
 152 void ZVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) {
 153   MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC);
 154   MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap);
 155 }
 156 
 157 bool ZVirtualMemoryManager::is_initialized() const {
 158   return _initialized;
 159 }
 160 
 161 ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) {
 162   uintptr_t start;
 163 
 164   // Small pages are allocated at low addresses, while medium/large pages
 165   // are allocated at high addresses (unless forced to be at a low address).
 166   if (force_low_address || size <= ZPageSizeSmall) {
 167     start = _manager.alloc_from_front(size);
 168   } else {
 169     start = _manager.alloc_from_back(size);
 170   }
 171 
 172   return ZVirtualMemory(start, size);
 173 }
 174 
 175 void ZVirtualMemoryManager::free(const ZVirtualMemory& vmem) {
 176   _manager.free(vmem.start(), vmem.size());
 177 }