1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
  26 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/os.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/align.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 
  34 // Explicit C-heap memory management
  35 
  36 #ifndef PRODUCT
  37 // Increments unsigned long value for statistics (not atomic on MP).
  38 inline void inc_stat_counter(volatile julong* dest, julong add_value) {
  39 #if defined(SPARC) || defined(X86)
  40   // Sparc and X86 have atomic jlong (8 bytes) instructions
  41   julong value = Atomic::load(dest);
  42   value += add_value;
  43   Atomic::store(value, dest);
  44 #else
  45   // possible word-tearing during load/store
  46   *dest += add_value;
  47 #endif
  48 }
  49 #endif
  50 
  51 // allocate using malloc; will fail if no memory available
  52 inline char* AllocateHeap(size_t size, MEMFLAGS flags,
  53     const NativeCallStack& stack,
  54     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  55   char* p = (char*) os::malloc(size, flags, stack);
  56   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  57     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
  58   }
  59   return p;
  60 }
  61 
  62 ALWAYSINLINE char* AllocateHeap(size_t size, MEMFLAGS flags,
  63     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  64   return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
  65 }
  66 
  67 ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
  68     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
  69   char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
  70   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  71     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
  72   }
  73   return p;
  74 }
  75 
  76 inline void FreeHeap(void* p) {
  77   os::free(p);
  78 }
  79 
  80 
  81 template <MEMFLAGS F>
  82 NOINLINE
  83 void* CHeapObj<F>::operator new(size_t size, const NativeCallStack& stack) throw() {
  84   return (void*)AllocateHeap(size, F, stack);
  85 }
  86 
  87 template <MEMFLAGS F>
  88 NOINLINE
  89 void* CHeapObj<F>::operator new(size_t size) throw() {
  90   return CHeapObj<F>::operator new(size, CALLER_PC);
  91 }
  92 
  93 template <MEMFLAGS F>
  94 NOINLINE
  95 void* CHeapObj<F>::operator new (size_t size, const std::nothrow_t&,
  96                                  const NativeCallStack& stack) throw() {
  97   return (void*)AllocateHeap(size, F, stack, AllocFailStrategy::RETURN_NULL);
  98 }
  99 
 100 template <MEMFLAGS F>
 101 NOINLINE
 102 void* CHeapObj<F>::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
 103   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 104 }
 105 
 106 template <MEMFLAGS F>
 107 NOINLINE
 108 void* CHeapObj<F>::operator new [](size_t size, const NativeCallStack& stack) throw() {
 109   return CHeapObj<F>::operator new(size, stack);
 110 }
 111 
 112 template <MEMFLAGS F>
 113 NOINLINE
 114 void* CHeapObj<F>::operator new [](size_t size) throw() {
 115   return CHeapObj<F>::operator new(size, CALLER_PC);
 116 }
 117 
 118 template <MEMFLAGS F>
 119 NOINLINE
 120 void* CHeapObj<F>::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
 121                                    const NativeCallStack& stack) throw() {
 122   return CHeapObj<F>::operator new(size, nothrow_constant, stack);
 123 }
 124 
 125 template <MEMFLAGS F>
 126 NOINLINE void* CHeapObj<F>::operator new [](size_t size,
 127   const std::nothrow_t& nothrow_constant) throw() {
 128   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 129 }
 130 
 131 template <MEMFLAGS F>
 132 void CHeapObj<F>::operator delete(void* p){
 133   FreeHeap(p);
 134 }
 135 
 136 template <MEMFLAGS F>
 137 void CHeapObj<F>::operator delete [](void* p){
 138   FreeHeap(p);
 139 }
 140 
 141 template <class E>
 142 size_t MmapArrayAllocator<E>::size_for(size_t length) {
 143   size_t size = length * sizeof(E);
 144   int alignment = os::vm_allocation_granularity();
 145   return align_up(size, alignment);
 146 }
 147 
 148 template <class E>
 149 E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
 150   size_t size = size_for(length);
 151   int alignment = os::vm_allocation_granularity();
 152 
 153   char* addr = os::reserve_memory(size, NULL, alignment, flags);
 154   if (addr == NULL) {
 155     return NULL;
 156   }
 157 
 158   if (os::commit_memory(addr, size, !ExecMem)) {
 159     return (E*)addr;
 160   } else {
 161     os::release_memory(addr, size);
 162     return NULL;
 163   }
 164 }
 165 
 166 template <class E>
 167 E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 168   size_t size = size_for(length);
 169   int alignment = os::vm_allocation_granularity();
 170 
 171   char* addr = os::reserve_memory(size, NULL, alignment, flags);
 172   if (addr == NULL) {
 173     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
 174   }
 175 
 176   os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
 177 
 178   return (E*)addr;
 179 }
 180 
 181 template <class E>
 182 void MmapArrayAllocator<E>::free(E* addr, size_t length) {
 183   bool result = os::release_memory((char*)addr, size_for(length));
 184   assert(result, "Failed to release memory");
 185 }
 186 
 187 template <class E>
 188 size_t MallocArrayAllocator<E>::size_for(size_t length) {
 189   return length * sizeof(E);
 190 }
 191 
 192 template <class E>
 193 E* MallocArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 194   return (E*)AllocateHeap(size_for(length), flags);
 195 }
 196 
 197 template<class E>
 198 void MallocArrayAllocator<E>::free(E* addr, size_t /*length*/) {
 199   FreeHeap(addr);
 200 }
 201 
 202 template <class E>
 203 bool ArrayAllocator<E>::should_use_malloc(size_t length) {
 204   return MallocArrayAllocator<E>::size_for(length) < ArrayAllocatorMallocLimit;
 205 }
 206 
 207 template <class E>
 208 E* ArrayAllocator<E>::allocate_malloc(size_t length, MEMFLAGS flags) {
 209   return MallocArrayAllocator<E>::allocate(length, flags);
 210 }
 211 
 212 template <class E>
 213 E* ArrayAllocator<E>::allocate_mmap(size_t length, MEMFLAGS flags) {
 214   return MmapArrayAllocator<E>::allocate(length, flags);
 215 }
 216 
 217 template <class E>
 218 E* ArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 219   if (should_use_malloc(length)) {
 220     return allocate_malloc(length, flags);
 221   }
 222 
 223   return allocate_mmap(length, flags);
 224 }
 225 
 226 template <class E>
 227 E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_length, MEMFLAGS flags) {
 228   E* new_addr = (new_length > 0)
 229       ? allocate(new_length, flags)
 230       : NULL;
 231 
 232   if (new_addr != NULL && old_addr != NULL) {
 233     memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
 234   }
 235 
 236   if (old_addr != NULL) {
 237     free(old_addr, old_length);
 238   }
 239 
 240   return new_addr;
 241 }
 242 
 243 template<class E>
 244 void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
 245   MallocArrayAllocator<E>::free(addr, length);
 246 }
 247 
 248 template<class E>
 249 void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
 250   MmapArrayAllocator<E>::free(addr, length);
 251 }
 252 
 253 template<class E>
 254 void ArrayAllocator<E>::free(E* addr, size_t length) {
 255   if (addr != NULL) {
 256     if (should_use_malloc(length)) {
 257       free_malloc(addr, length);
 258     } else {
 259       free_mmap(addr, length);
 260     }
 261   }
 262 }
 263 
 264 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP