1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zCollectedHeap.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zHeap.inline.hpp"
  28 #include "gc/z/zHeuristics.hpp"
  29 #include "gc/z/zObjectAllocator.hpp"
  30 #include "gc/z/zPage.inline.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zThread.inline.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "gc/z/zValue.inline.hpp"
  35 #include "logging/log.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/safepoint.hpp"
  38 #include "runtime/thread.hpp"
  39 #include "runtime/threadSMR.hpp"
  40 #include "utilities/align.hpp"
  41 #include "utilities/debug.hpp"
  42 
  43 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
  44 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
  45 
  46 ZObjectAllocator::ZObjectAllocator() :
  47     _use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
  48     _used(0),
  49     _undone(0),
  50     _shared_medium_page(NULL),
  51     _shared_small_page(NULL),
  52     _worker_small_page(NULL) {}
  53 
  54 ZPage** ZObjectAllocator::shared_small_page_addr() {
  55   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  56 }
  57 
  58 ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
  59   return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
  60 }
  61 
  62 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  63   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  64   if (page != NULL) {
  65     // Increment used bytes
  66     Atomic::add(_used.addr(), size);
  67   }
  68 
  69   return page;
  70 }
  71 
  72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  73   // Increment undone bytes
  74   Atomic::add(_undone.addr(), page->size());
  75 
  76   ZHeap::heap()->undo_alloc_page(page);
  77 }
  78 
  79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  80                                                         uint8_t page_type,
  81                                                         size_t page_size,
  82                                                         size_t size,
  83                                                         ZAllocationFlags flags) {
  84   uintptr_t addr = 0;
  85   ZPage* page = Atomic::load_acquire(shared_page);
  86 
  87   if (page != NULL) {
  88     addr = page->alloc_object_atomic(size);
  89   }
  90 
  91   if (addr == 0) {
  92     // Allocate new page
  93     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  94     if (new_page != NULL) {
  95       // Allocate object before installing the new page
  96       addr = new_page->alloc_object(size);
  97 
  98     retry:
  99       // Install new page
 100       ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page);
 101       if (prev_page != page) {
 102         if (prev_page == NULL) {
 103           // Previous page was retired, retry installing the new page
 104           page = prev_page;
 105           goto retry;
 106         }
 107 
 108         // Another page already installed, try allocation there first
 109         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
 110         if (prev_addr == 0) {
 111           // Allocation failed, retry installing the new page
 112           page = prev_page;
 113           goto retry;
 114         }
 115 
 116         // Allocation succeeded in already installed page
 117         addr = prev_addr;
 118 
 119         // Undo new page allocation
 120         undo_alloc_page(new_page);
 121       }
 122     }
 123   }
 124 
 125   return addr;
 126 }
 127 
 128 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
 129   assert(ZThread::is_java(), "Should be a Java thread");
 130 
 131   uintptr_t addr = 0;
 132 
 133   // Allocate new large page
 134   const size_t page_size = align_up(size, ZGranuleSize);
 135   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
 136   if (page != NULL) {
 137     // Allocate the object
 138     addr = page->alloc_object(size);
 139   }
 140 
 141   return addr;
 142 }
 143 
 144 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
 145   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
 146 }
 147 
 148 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
 149   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
 150          "Should be a Java, VM or Runtime worker thread");
 151 
 152   // Non-worker small page allocation can never use the reserve
 153   flags.set_no_reserve();
 154 
 155   return alloc_object_in_shared_page(shared_small_page_addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
 156 }
 157 
 158 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
 159   assert(ZThread::is_worker(), "Should be a worker thread");
 160 
 161   ZPage* page = _worker_small_page.get();
 162   uintptr_t addr = 0;
 163 
 164   if (page != NULL) {
 165     addr = page->alloc_object(size);
 166   }
 167 
 168   if (addr == 0) {
 169     // Allocate new page
 170     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
 171     if (page != NULL) {
 172       addr = page->alloc_object(size);
 173     }
 174     _worker_small_page.set(page);
 175   }
 176 
 177   return addr;
 178 }
 179 
 180 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
 181   if (flags.worker_thread()) {
 182     return alloc_small_object_from_worker(size, flags);
 183   } else {
 184     return alloc_small_object_from_nonworker(size, flags);
 185   }
 186 }
 187 
 188 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
 189   if (size <= ZObjectSizeLimitSmall) {
 190     // Small
 191     return alloc_small_object(size, flags);
 192   } else if (size <= ZObjectSizeLimitMedium) {
 193     // Medium
 194     return alloc_medium_object(size, flags);
 195   } else {
 196     // Large
 197     return alloc_large_object(size, flags);
 198   }
 199 }
 200 
 201 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
 202   assert(ZThread::is_java(), "Must be a Java thread");
 203 
 204   ZAllocationFlags flags;
 205   flags.set_no_reserve();
 206 
 207   return alloc_object(size, flags);
 208 }
 209 
 210 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
 211   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
 212          "Unknown thread");
 213 
 214   ZAllocationFlags flags;
 215   flags.set_relocation();
 216   flags.set_non_blocking();
 217 
 218   if (ZThread::is_worker()) {
 219     flags.set_worker_thread();
 220   }
 221 
 222   return alloc_object(size, flags);
 223 }
 224 
 225 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
 226   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 227 
 228   // Undo page allocation
 229   undo_alloc_page(page);
 230   return true;
 231 }
 232 
 233 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
 234   assert(page->type() == ZPageTypeMedium, "Invalid page type");
 235 
 236   // Try atomic undo on shared page
 237   return page->undo_alloc_object_atomic(addr, size);
 238 }
 239 
 240 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
 241   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 242 
 243   // Try atomic undo on shared page
 244   return page->undo_alloc_object_atomic(addr, size);
 245 }
 246 
 247 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
 248   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 249   assert(page == _worker_small_page.get(), "Invalid page");
 250 
 251   // Non-atomic undo on worker-local page
 252   const bool success = page->undo_alloc_object(addr, size);
 253   assert(success, "Should always succeed");
 254   return success;
 255 }
 256 
 257 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
 258   if (ZThread::is_worker()) {
 259     return undo_alloc_small_object_from_worker(page, addr, size);
 260   } else {
 261     return undo_alloc_small_object_from_nonworker(page, addr, size);
 262   }
 263 }
 264 
 265 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
 266   const uint8_t type = page->type();
 267 
 268   if (type == ZPageTypeSmall) {
 269     return undo_alloc_small_object(page, addr, size);
 270   } else if (type == ZPageTypeMedium) {
 271     return undo_alloc_medium_object(page, addr, size);
 272   } else {
 273     return undo_alloc_large_object(page);
 274   }
 275 }
 276 
 277 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
 278   if (undo_alloc_object(page, addr, size)) {
 279     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
 280   } else {
 281     ZStatInc(ZCounterUndoObjectAllocationFailed);
 282     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
 283                   addr, size, ZThread::id(), ZThread::name());
 284   }
 285 }
 286 
 287 size_t ZObjectAllocator::used() const {
 288   size_t total_used = 0;
 289   size_t total_undone = 0;
 290 
 291   ZPerCPUConstIterator<size_t> iter_used(&_used);
 292   for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
 293     total_used += *cpu_used;
 294   }
 295 
 296   ZPerCPUConstIterator<size_t> iter_undone(&_undone);
 297   for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
 298     total_undone += *cpu_undone;
 299   }
 300 
 301   return total_used - total_undone;
 302 }
 303 
 304 size_t ZObjectAllocator::remaining() const {
 305   assert(ZThread::is_java(), "Should be a Java thread");
 306 
 307   const ZPage* const page = Atomic::load_acquire(shared_small_page_addr());
 308   if (page != NULL) {
 309     return page->remaining();
 310   }
 311 
 312   return 0;
 313 }
 314 
 315 void ZObjectAllocator::retire_pages() {
 316   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 317 
 318   // Reset used and undone bytes
 319   _used.set_all(0);
 320   _undone.set_all(0);
 321 
 322   // Reset allocation pages
 323   _shared_medium_page.set(NULL);
 324   _shared_small_page.set_all(NULL);
 325   _worker_small_page.set_all(NULL);
 326 }