1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zCollectedHeap.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zHeap.inline.hpp"
  28 #include "gc/z/zObjectAllocator.hpp"
  29 #include "gc/z/zPage.inline.hpp"
  30 #include "gc/z/zStat.hpp"
  31 #include "gc/z/zThread.hpp"
  32 #include "gc/z/zUtils.inline.hpp"
  33 #include "logging/log.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/safepoint.hpp"
  36 #include "runtime/thread.hpp"
  37 #include "runtime/threadSMR.hpp"
  38 #include "utilities/align.hpp"
  39 #include "utilities/debug.hpp"
  40 
  41 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
  42 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
  43 
  44 ZObjectAllocator::ZObjectAllocator(uint nworkers) :
  45     _nworkers(nworkers),
  46     _used(0),
  47     _shared_medium_page(NULL),
  48     _shared_small_page(NULL),
  49     _worker_small_page(NULL) {}
  50 
  51 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  52   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  53   if (page != NULL) {
  54     // Increment used bytes
  55     Atomic::add(size, _used.addr());
  56   }
  57 
  58   return page;
  59 }
  60 
  61 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  62                                                         uint8_t page_type,
  63                                                         size_t page_size,
  64                                                         size_t size,
  65                                                         ZAllocationFlags flags) {
  66   uintptr_t addr = 0;
  67   ZPage* page = *shared_page;
  68 
  69   if (page != NULL) {
  70     addr = page->alloc_object_atomic(size);
  71   }
  72 
  73   if (addr == 0) {
  74     // Allocate new page
  75     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  76     if (new_page != NULL) {
  77       // Allocate object before installing the new page
  78       addr = new_page->alloc_object(size);
  79 
  80     retry:
  81       // Install new page
  82       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
  83       if (prev_page != page) {
  84         if (prev_page == NULL) {
  85           // Previous page was retired, retry installing the new page
  86           page = prev_page;
  87           goto retry;
  88         }
  89 
  90         // Another page already installed, try allocation there first
  91         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
  92         if (prev_addr == 0) {
  93           // Allocation failed, retry installing the new page
  94           page = prev_page;
  95           goto retry;
  96         }
  97 
  98         // Allocation succeeded in already installed page
  99         addr = prev_addr;
 100 
 101         // Undo new page allocation
 102         ZHeap::heap()->undo_alloc_page(new_page);
 103       }
 104     }
 105   }
 106 
 107   return addr;
 108 }
 109 
 110 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
 111   assert(ZThread::is_java(), "Should be a Java thread");
 112 
 113   uintptr_t addr = 0;
 114 
 115   // Allocate new large page
 116   const size_t page_size = align_up(size, ZGranuleSize);
 117   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
 118   if (page != NULL) {
 119     // Allocate the object
 120     addr = page->alloc_object(size);
 121   }
 122 
 123   return addr;
 124 }
 125 
 126 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
 127   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
 128 }
 129 
 130 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
 131   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
 132          "Should be a Java, VM or Runtime worker thread");
 133 
 134   // Non-worker small page allocation can never use the reserve
 135   flags.set_no_reserve();
 136 
 137   return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
 138 }
 139 
 140 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
 141   assert(ZThread::is_worker(), "Should be a worker thread");
 142 
 143   ZPage* page = _worker_small_page.get();
 144   uintptr_t addr = 0;
 145 
 146   if (page != NULL) {
 147     addr = page->alloc_object(size);
 148   }
 149 
 150   if (addr == 0) {
 151     // Allocate new page
 152     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
 153     if (page != NULL) {
 154       addr = page->alloc_object(size);
 155     }
 156     _worker_small_page.set(page);
 157   }
 158 
 159   return addr;
 160 }
 161 
 162 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
 163   if (flags.worker_thread()) {
 164     return alloc_small_object_from_worker(size, flags);
 165   } else {
 166     return alloc_small_object_from_nonworker(size, flags);
 167   }
 168 }
 169 
 170 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
 171   if (size <= ZObjectSizeLimitSmall) {
 172     // Small
 173     return alloc_small_object(size, flags);
 174   } else if (size <= ZObjectSizeLimitMedium) {
 175     // Medium
 176     return alloc_medium_object(size, flags);
 177   } else {
 178     // Large
 179     return alloc_large_object(size, flags);
 180   }
 181 }
 182 
 183 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
 184   assert(ZThread::is_java(), "Must be a Java thread");
 185 
 186   ZAllocationFlags flags;
 187   flags.set_no_reserve();
 188 
 189   if (!ZStallOnOutOfMemory) {
 190     flags.set_non_blocking();
 191   }
 192 
 193   return alloc_object(size, flags);
 194 }
 195 
 196 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
 197   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
 198          "Unknown thread");
 199 
 200   ZAllocationFlags flags;
 201   flags.set_relocation();
 202   flags.set_non_blocking();
 203 
 204   if (ZThread::is_worker()) {
 205     flags.set_worker_thread();
 206   }
 207 
 208   return alloc_object(size, flags);
 209 }
 210 
 211 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
 212   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 213 
 214   // Undo page allocation
 215   ZHeap::heap()->undo_alloc_page(page);
 216   return true;
 217 }
 218 
 219 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
 220   assert(page->type() == ZPageTypeMedium, "Invalid page type");
 221 
 222   // Try atomic undo on shared page
 223   return page->undo_alloc_object_atomic(addr, size);
 224 }
 225 
 226 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
 227   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 228 
 229   // Try atomic undo on shared page
 230   return page->undo_alloc_object_atomic(addr, size);
 231 }
 232 
 233 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
 234   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 235   assert(page == _worker_small_page.get(), "Invalid page");
 236 
 237   // Non-atomic undo on worker-local page
 238   const bool success = page->undo_alloc_object(addr, size);
 239   assert(success, "Should always succeed");
 240   return success;
 241 }
 242 
 243 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
 244   if (ZThread::is_worker()) {
 245     return undo_alloc_small_object_from_worker(page, addr, size);
 246   } else {
 247     return undo_alloc_small_object_from_nonworker(page, addr, size);
 248   }
 249 }
 250 
 251 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
 252   const uint8_t type = page->type();
 253 
 254   if (type == ZPageTypeSmall) {
 255     return undo_alloc_small_object(page, addr, size);
 256   } else if (type == ZPageTypeMedium) {
 257     return undo_alloc_medium_object(page, addr, size);
 258   } else {
 259     return undo_alloc_large_object(page);
 260   }
 261 }
 262 
 263 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
 264   if (undo_alloc_object(page, addr, size)) {
 265     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
 266   } else {
 267     ZStatInc(ZCounterUndoObjectAllocationFailed);
 268     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
 269                   addr, size, ZThread::id(), ZThread::name());
 270   }
 271 }
 272 
 273 size_t ZObjectAllocator::used() const {
 274   size_t total_used = 0;
 275 
 276   ZPerCPUConstIterator<size_t> iter(&_used);
 277   for (const size_t* cpu_used; iter.next(&cpu_used);) {
 278     total_used += *cpu_used;
 279   }
 280 
 281   return total_used;
 282 }
 283 
 284 size_t ZObjectAllocator::remaining() const {
 285   assert(ZThread::is_java(), "Should be a Java thread");
 286 
 287   ZPage* page = _shared_small_page.get();
 288   if (page != NULL) {
 289     return page->remaining();
 290   }
 291 
 292   return 0;
 293 }
 294 
 295 void ZObjectAllocator::retire_pages() {
 296   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 297 
 298   // Reset used
 299   _used.set_all(0);
 300 
 301   // Reset allocation pages
 302   _shared_medium_page.set(NULL);
 303   _shared_small_page.set_all(NULL);
 304   _worker_small_page.set_all(NULL);
 305 }