1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_MEMORY_ALLOCATION_INLINE_HPP
  26 #define SHARE_MEMORY_ALLOCATION_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/os.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/align.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 
  34 // Explicit C-heap memory management
  35 
  36 #ifndef PRODUCT
  37 // Increments unsigned long value for statistics (not atomic on MP).
  38 inline void inc_stat_counter(volatile julong* dest, julong add_value) {
  39 #if defined(SPARC) || defined(X86)
  40   // Sparc and X86 have atomic jlong (8 bytes) instructions
  41   julong value = Atomic::load(dest);
  42   value += add_value;
  43   Atomic::store(value, dest);
  44 #else
  45   // possible word-tearing during load/store
  46   *dest += add_value;
  47 #endif
  48 }
  49 #endif
  50 
  51 template <class E>
  52 size_t MmapArrayAllocator<E>::size_for(size_t length) {
  53   size_t size = length * sizeof(E);
  54   int alignment = os::vm_allocation_granularity();
  55   return align_up(size, alignment);
  56 }
  57 
  58 template <class E>
  59 E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
  60   size_t size = size_for(length);
  61   int alignment = os::vm_allocation_granularity();
  62 
  63   char* addr = os::reserve_memory(size, NULL, alignment, flags);
  64   if (addr == NULL) {
  65     return NULL;
  66   }
  67 
  68   if (os::commit_memory(addr, size, !ExecMem)) {
  69     return (E*)addr;
  70   } else {
  71     os::release_memory(addr, size);
  72     return NULL;
  73   }
  74 }
  75 
  76 template <class E>
  77 E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
  78   size_t size = size_for(length);
  79   int alignment = os::vm_allocation_granularity();
  80 
  81   char* addr = os::reserve_memory(size, NULL, alignment, flags);
  82   if (addr == NULL) {
  83     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
  84   }
  85 
  86   os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
  87 
  88   return (E*)addr;
  89 }
  90 
  91 template <class E>
  92 void MmapArrayAllocator<E>::free(E* addr, size_t length) {
  93   bool result = os::release_memory((char*)addr, size_for(length));
  94   assert(result, "Failed to release memory");
  95 }
  96 
  97 template <class E>
  98 size_t MallocArrayAllocator<E>::size_for(size_t length) {
  99   return length * sizeof(E);
 100 }
 101 
 102 template <class E>
 103 E* MallocArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 104   return (E*)AllocateHeap(size_for(length), flags);
 105 }
 106 
 107 template<class E>
 108 void MallocArrayAllocator<E>::free(E* addr) {
 109   FreeHeap(addr);
 110 }
 111 
 112 template <class E>
 113 bool ArrayAllocator<E>::should_use_malloc(size_t length) {
 114   return MallocArrayAllocator<E>::size_for(length) < ArrayAllocatorMallocLimit;
 115 }
 116 
 117 template <class E>
 118 E* ArrayAllocator<E>::allocate_malloc(size_t length, MEMFLAGS flags) {
 119   return MallocArrayAllocator<E>::allocate(length, flags);
 120 }
 121 
 122 template <class E>
 123 E* ArrayAllocator<E>::allocate_mmap(size_t length, MEMFLAGS flags) {
 124   return MmapArrayAllocator<E>::allocate(length, flags);
 125 }
 126 
 127 template <class E>
 128 E* ArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
 129   if (should_use_malloc(length)) {
 130     return allocate_malloc(length, flags);
 131   }
 132 
 133   return allocate_mmap(length, flags);
 134 }
 135 
 136 template <class E>
 137 E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_length, MEMFLAGS flags) {
 138   E* new_addr = (new_length > 0)
 139       ? allocate(new_length, flags)
 140       : NULL;
 141 
 142   if (new_addr != NULL && old_addr != NULL) {
 143     memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
 144   }
 145 
 146   if (old_addr != NULL) {
 147     free(old_addr, old_length);
 148   }
 149 
 150   return new_addr;
 151 }
 152 
 153 template<class E>
 154 void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
 155   MallocArrayAllocator<E>::free(addr);
 156 }
 157 
 158 template<class E>
 159 void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
 160   MmapArrayAllocator<E>::free(addr, length);
 161 }
 162 
 163 template<class E>
 164 void ArrayAllocator<E>::free(E* addr, size_t length) {
 165   if (addr != NULL) {
 166     if (should_use_malloc(length)) {
 167       free_malloc(addr, length);
 168     } else {
 169       free_mmap(addr, length);
 170     }
 171   }
 172 }
 173 
 174 #endif // SHARE_MEMORY_ALLOCATION_INLINE_HPP