1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zCollectedHeap.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zHeap.inline.hpp"
  28 #include "gc/z/zObjectAllocator.hpp"
  29 #include "gc/z/zPage.inline.hpp"
  30 #include "gc/z/zStat.hpp"
  31 #include "gc/z/zThread.inline.hpp"
  32 #include "gc/z/zUtils.inline.hpp"
  33 #include "gc/z/zValue.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/safepoint.hpp"
  37 #include "runtime/thread.hpp"
  38 #include "runtime/threadSMR.hpp"
  39 #include "utilities/align.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
  43 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
  44 
  45 ZObjectAllocator::ZObjectAllocator() :
  46     _used(0),
  47     _undone(0),
  48     _shared_medium_page(NULL),
  49     _shared_small_page(NULL),
  50     _worker_small_page(NULL) {}
  51 
  52 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  53   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  54   if (page != NULL) {
  55     // Increment used bytes
  56     Atomic::add(size, _used.addr());
  57   }
  58 
  59   return page;
  60 }
  61 
  62 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
  63   // Increment undone bytes
  64   Atomic::add(page->size(), _undone.addr());
  65 
  66   ZHeap::heap()->undo_alloc_page(page);
  67 }
  68 
  69 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  70                                                         uint8_t page_type,
  71                                                         size_t page_size,
  72                                                         size_t size,
  73                                                         ZAllocationFlags flags) {
  74   uintptr_t addr = 0;
  75   ZPage* page = *shared_page;
  76 
  77   if (page != NULL) {
  78     addr = page->alloc_object_atomic(size);
  79   }
  80 
  81   if (addr == 0) {
  82     // Allocate new page
  83     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  84     if (new_page != NULL) {
  85       // Allocate object before installing the new page
  86       addr = new_page->alloc_object(size);
  87 
  88     retry:
  89       // Install new page
  90       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
  91       if (prev_page != page) {
  92         if (prev_page == NULL) {
  93           // Previous page was retired, retry installing the new page
  94           page = prev_page;
  95           goto retry;
  96         }
  97 
  98         // Another page already installed, try allocation there first
  99         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
 100         if (prev_addr == 0) {
 101           // Allocation failed, retry installing the new page
 102           page = prev_page;
 103           goto retry;
 104         }
 105 
 106         // Allocation succeeded in already installed page
 107         addr = prev_addr;
 108 
 109         // Undo new page allocation
 110         undo_alloc_page(new_page);
 111       }
 112     }
 113   }
 114 
 115   return addr;
 116 }
 117 
 118 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
 119   assert(ZThread::is_java(), "Should be a Java thread");
 120 
 121   uintptr_t addr = 0;
 122 
 123   // Allocate new large page
 124   const size_t page_size = align_up(size, ZGranuleSize);
 125   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
 126   if (page != NULL) {
 127     // Allocate the object
 128     addr = page->alloc_object(size);
 129   }
 130 
 131   return addr;
 132 }
 133 
 134 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
 135   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
 136 }
 137 
 138 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
 139   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
 140          "Should be a Java, VM or Runtime worker thread");
 141 
 142   // Non-worker small page allocation can never use the reserve
 143   flags.set_no_reserve();
 144 
 145   return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
 146 }
 147 
 148 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
 149   assert(ZThread::is_worker(), "Should be a worker thread");
 150 
 151   ZPage* page = _worker_small_page.get();
 152   uintptr_t addr = 0;
 153 
 154   if (page != NULL) {
 155     addr = page->alloc_object(size);
 156   }
 157 
 158   if (addr == 0) {
 159     // Allocate new page
 160     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
 161     if (page != NULL) {
 162       addr = page->alloc_object(size);
 163     }
 164     _worker_small_page.set(page);
 165   }
 166 
 167   return addr;
 168 }
 169 
 170 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
 171   if (flags.worker_thread()) {
 172     return alloc_small_object_from_worker(size, flags);
 173   } else {
 174     return alloc_small_object_from_nonworker(size, flags);
 175   }
 176 }
 177 
 178 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
 179   if (size <= ZObjectSizeLimitSmall) {
 180     // Small
 181     return alloc_small_object(size, flags);
 182   } else if (size <= ZObjectSizeLimitMedium) {
 183     // Medium
 184     return alloc_medium_object(size, flags);
 185   } else {
 186     // Large
 187     return alloc_large_object(size, flags);
 188   }
 189 }
 190 
 191 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
 192   assert(ZThread::is_java(), "Must be a Java thread");
 193 
 194   ZAllocationFlags flags;
 195   flags.set_no_reserve();
 196 
 197   return alloc_object(size, flags);
 198 }
 199 
 200 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
 201   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
 202          "Unknown thread");
 203 
 204   ZAllocationFlags flags;
 205   flags.set_relocation();
 206   flags.set_non_blocking();
 207 
 208   if (ZThread::is_worker()) {
 209     flags.set_worker_thread();
 210   }
 211 
 212   return alloc_object(size, flags);
 213 }
 214 
 215 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
 216   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 217 
 218   // Undo page allocation
 219   undo_alloc_page(page);
 220   return true;
 221 }
 222 
 223 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
 224   assert(page->type() == ZPageTypeMedium, "Invalid page type");
 225 
 226   // Try atomic undo on shared page
 227   return page->undo_alloc_object_atomic(addr, size);
 228 }
 229 
 230 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
 231   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 232 
 233   // Try atomic undo on shared page
 234   return page->undo_alloc_object_atomic(addr, size);
 235 }
 236 
 237 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
 238   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 239   assert(page == _worker_small_page.get(), "Invalid page");
 240 
 241   // Non-atomic undo on worker-local page
 242   const bool success = page->undo_alloc_object(addr, size);
 243   assert(success, "Should always succeed");
 244   return success;
 245 }
 246 
 247 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
 248   if (ZThread::is_worker()) {
 249     return undo_alloc_small_object_from_worker(page, addr, size);
 250   } else {
 251     return undo_alloc_small_object_from_nonworker(page, addr, size);
 252   }
 253 }
 254 
 255 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
 256   const uint8_t type = page->type();
 257 
 258   if (type == ZPageTypeSmall) {
 259     return undo_alloc_small_object(page, addr, size);
 260   } else if (type == ZPageTypeMedium) {
 261     return undo_alloc_medium_object(page, addr, size);
 262   } else {
 263     return undo_alloc_large_object(page);
 264   }
 265 }
 266 
 267 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
 268   if (undo_alloc_object(page, addr, size)) {
 269     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
 270   } else {
 271     ZStatInc(ZCounterUndoObjectAllocationFailed);
 272     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
 273                   addr, size, ZThread::id(), ZThread::name());
 274   }
 275 }
 276 
 277 size_t ZObjectAllocator::used() const {
 278   size_t total_used = 0;
 279   size_t total_undone = 0;
 280 
 281   ZPerCPUConstIterator<size_t> iter_used(&_used);
 282   for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
 283     total_used += *cpu_used;
 284   }
 285 
 286   ZPerCPUConstIterator<size_t> iter_undone(&_undone);
 287   for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
 288     total_undone += *cpu_undone;
 289   }
 290 
 291   return total_used - total_undone;
 292 }
 293 
 294 size_t ZObjectAllocator::remaining() const {
 295   assert(ZThread::is_java(), "Should be a Java thread");
 296 
 297   ZPage* page = _shared_small_page.get();
 298   if (page != NULL) {
 299     return page->remaining();
 300   }
 301 
 302   return 0;
 303 }
 304 
 305 void ZObjectAllocator::retire_pages() {
 306   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 307 
 308   // Reset used and undone bytes
 309   _used.set_all(0);
 310   _undone.set_all(0);
 311 
 312   // Reset allocation pages
 313   _shared_medium_page.set(NULL);
 314   _shared_small_page.set_all(NULL);
 315   _worker_small_page.set_all(NULL);
 316 }