1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
  26 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/os.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/align.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 
  34 // Explicit C-heap memory management
  35 
  36 void trace_heap_malloc(size_t size, const char* name, void *p);
  37 void trace_heap_free(void *p);
  38 
  39 #ifndef PRODUCT
  40 // Increments unsigned long value for statistics (not atomic on MP).
  41 inline void inc_stat_counter(volatile julong* dest, julong add_value) {
  42 #if defined(SPARC) || defined(X86)
  43   // Sparc and X86 have atomic jlong (8 bytes) instructions
  44   julong value = Atomic::load((volatile jlong*)dest);
  45   value += add_value;
  46   Atomic::store((jlong)value, (volatile jlong*)dest);
  47 #else
  48   // possible word-tearing during load/store
  49   *dest += add_value;
  50 #endif
  51 }
  52 #endif
  53 
  54 // allocate using malloc; will fail if no memory available
  55 inline char* AllocateHeap(size_t size, MEMFLAGS flags,
  56     const NativeCallStack& stack,
  57     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  58   char* p = (char*) os::malloc(size, flags, stack);
  59   #ifdef ASSERT
  60   if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
  61   #endif
  62   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  63     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
  64   }
  65   return p;
  66 }
  67 
  68 ALWAYSINLINE char* AllocateHeap(size_t size, MEMFLAGS flags,
  69     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  70   return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
  71 }
  72 
  73 ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
  74     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  75   char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
  76   #ifdef ASSERT
  77   if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
  78   #endif
  79   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  80     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
  81   }
  82   return p;
  83 }
  84 
  85 inline void FreeHeap(void* p) {
  86   #ifdef ASSERT
  87   if (PrintMallocFree) trace_heap_free(p);
  88   #endif
  89   os::free(p);
  90 }
  91 
  92 
  93 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
  94       const NativeCallStack& stack) throw() {
  95   void* p = (void*)AllocateHeap(size, F, stack);
  96 #ifdef ASSERT
  97   if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
  98 #endif
  99   return p;
 100 }
 101 
 102 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
 103   return CHeapObj<F>::operator new(size, CALLER_PC);
 104 }
 105 
 106 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
 107   const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
 108   void* p = (void*)AllocateHeap(size, F, stack,
 109       AllocFailStrategy::RETURN_NULL);
 110 #ifdef ASSERT
 111     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
 112 #endif
 113     return p;
 114   }
 115 
 116 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
 117   const std::nothrow_t& nothrow_constant) throw() {
 118   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 119 }
 120 
 121 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
 122       const NativeCallStack& stack) throw() {
 123   return CHeapObj<F>::operator new(size, stack);
 124 }
 125 
 126 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
 127   throw() {
 128   return CHeapObj<F>::operator new(size, CALLER_PC);
 129 }
 130 
 131 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
 132   const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
 133   return CHeapObj<F>::operator new(size, nothrow_constant, stack);
 134 }
 135 
 136 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
 137   const std::nothrow_t& nothrow_constant) throw() {
 138   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 139 }
 140 
 141 template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
 142     FreeHeap(p);
 143 }
 144 
 145 template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
 146     FreeHeap(p);
 147 }
 148 
 149 template <class E>
 150 size_t MmapArrayAllocator<E>::size_for(size_t length) {
 151   size_t size = length * sizeof(E);
 152   int alignment = os::vm_allocation_granularity();
 153   return align_up(size, alignment);
 154 }
 155 
 156 template <class E>
 157 E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
 158   size_t size = size_for(length);
 159   int alignment = os::vm_allocation_granularity();
 160 
 161   char* addr = os::reserve_memory(size, NULL, alignment, flags);
 162   if (addr == NULL) {
 163     return NULL;
 164   }
 165 
 166   if (os::commit_memory(addr, size, !ExecMem, "Allocator (commit)")) {
 167     return (E*)addr;
 168   } else {
 169     os::release_memory(addr, size);
 170     return NULL;
 171   }
 172 }
 173 
 174 template <class E>
 175 E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 176   size_t size = size_for(length);
 177   int alignment = os::vm_allocation_granularity();
 178 
 179   char* addr = os::reserve_memory(size, NULL, alignment, flags);
 180   if (addr == NULL) {
 181     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
 182   }
 183 
 184   os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
 185 
 186   return (E*)addr;
 187 }
 188 
 189 template <class E>
 190 void MmapArrayAllocator<E>::free(E* addr, size_t length) {
 191   bool result = os::release_memory((char*)addr, size_for(length));
 192   assert(result, "Failed to release memory");
 193 }
 194 
 195 template <class E>
 196 size_t MallocArrayAllocator<E>::size_for(size_t length) {
 197   return length * sizeof(E);
 198 }
 199 
 200 template <class E>
 201 E* MallocArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 202   return (E*)AllocateHeap(size_for(length), flags);
 203 }
 204 
 205 template<class E>
 206 void MallocArrayAllocator<E>::free(E* addr, size_t /*length*/) {
 207   FreeHeap(addr);
 208 }
 209 
 210 template <class E>
 211 bool ArrayAllocator<E>::should_use_malloc(size_t length) {
 212   return MallocArrayAllocator<E>::size_for(length) < ArrayAllocatorMallocLimit;
 213 }
 214 
 215 template <class E>
 216 E* ArrayAllocator<E>::allocate_malloc(size_t length, MEMFLAGS flags) {
 217   return MallocArrayAllocator<E>::allocate(length, flags);
 218 }
 219 
 220 template <class E>
 221 E* ArrayAllocator<E>::allocate_mmap(size_t length, MEMFLAGS flags) {
 222   return MmapArrayAllocator<E>::allocate(length, flags);
 223 }
 224 
 225 template <class E>
 226 E* ArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 227   if (should_use_malloc(length)) {
 228     return allocate_malloc(length, flags);
 229   }
 230 
 231   return allocate_mmap(length, flags);
 232 }
 233 
 234 template <class E>
 235 E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_length, MEMFLAGS flags) {
 236   E* new_addr = (new_length > 0)
 237       ? allocate(new_length, flags)
 238       : NULL;
 239 
 240   if (new_addr != NULL && old_addr != NULL) {
 241     memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
 242   }
 243 
 244   if (old_addr != NULL) {
 245     free(old_addr, old_length);
 246   }
 247 
 248   return new_addr;
 249 }
 250 
 251 template<class E>
 252 void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
 253   MallocArrayAllocator<E>::free(addr, length);
 254 }
 255 
 256 template<class E>
 257 void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
 258   MmapArrayAllocator<E>::free(addr, length);
 259 }
 260 
 261 template<class E>
 262 void ArrayAllocator<E>::free(E* addr, size_t length) {
 263   if (addr != NULL) {
 264     if (should_use_malloc(length)) {
 265       free_malloc(addr, length);
 266     } else {
 267       free_mmap(addr, length);
 268     }
 269   }
 270 }
 271 
 272 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP