1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  26 #include "gc/z/zCollectedHeap.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zObjectAllocator.hpp"
  30 #include "gc/z/zPage.inline.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zThread.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/safepoint.hpp"
  37 #include "runtime/thread.hpp"
  38 #include "runtime/threadSMR.hpp"
  39 #include "utilities/align.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
  43 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
  44 static const ZStatSubPhase ZSubPhasePauseRetireTLABS("Pause Retire TLABS");
  45 static const ZStatSubPhase ZSubPhasePauseRemapTLABS("Pause Remap TLABS");
  46 
  47 ZObjectAllocator::ZObjectAllocator(uint nworkers) :
  48     _nworkers(nworkers),
  49     _used(0),
  50     _shared_medium_page(NULL),
  51     _shared_small_page(NULL),
  52     _worker_small_page(NULL) {}
  53 
  54 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  55   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  56   if (page != NULL) {
  57     // Increment used bytes
  58     Atomic::add(size, _used.addr());
  59   }
  60 
  61   return page;
  62 }
  63 
  64 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  65                                                         uint8_t page_type,
  66                                                         size_t page_size,
  67                                                         size_t size,
  68                                                         ZAllocationFlags flags) {
  69   uintptr_t addr = 0;
  70   ZPage* page = *shared_page;
  71 
  72   if (page != NULL) {
  73     addr = page->alloc_object_atomic(size);
  74   }
  75 
  76   if (addr == 0) {
  77     // Allocate new page
  78     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  79     if (new_page != NULL) {
  80       // Allocate object before installing the new page
  81       addr = new_page->alloc_object(size);
  82 
  83     retry:
  84       // Install new page
  85       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
  86       if (prev_page != page) {
  87         if (prev_page == NULL) {
  88           // Previous page was retired, retry installing the new page
  89           page = prev_page;
  90           goto retry;
  91         }
  92 
  93         // Another page already installed, try allocation there first
  94         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
  95         if (prev_addr == 0) {
  96           // Allocation failed, retry installing the new page
  97           page = prev_page;
  98           goto retry;
  99         }
 100 
 101         // Allocation succeeded in already installed page
 102         addr = prev_addr;
 103 
 104         // Undo new page allocation
 105         ZHeap::heap()->undo_alloc_page(new_page);
 106       }
 107     }
 108   }
 109 
 110   return addr;
 111 }
 112 
 113 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
 114   assert(ZThread::is_java(), "Should be a Java thread");
 115 
 116   uintptr_t addr = 0;
 117 
 118   // Allocate new large page
 119   const size_t page_size = align_up(size, ZPageSizeMin);
 120   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
 121   if (page != NULL) {
 122     // Allocate the object
 123     addr = page->alloc_object(size);
 124   }
 125 
 126   return addr;
 127 }
 128 
 129 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
 130   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
 131 }
 132 
 133 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
 134   assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
 135 
 136   if (flags.relocation() && flags.java_thread()) {
 137     // For relocations from Java threads, try TLAB allocation first
 138     const uintptr_t addr = (uintptr_t)Thread::current()->tlab().allocate(ZUtils::bytes_to_words(size));
 139     if (addr != 0) {
 140       return addr;
 141     }
 142   }
 143 
 144   // Non-worker small page allocation can never use the reserve
 145   flags.set_no_reserve();
 146 
 147   return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
 148 }
 149 
 150 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
 151   assert(ZThread::is_worker(), "Should be a worker thread");
 152 
 153   ZPage* page = _worker_small_page.get();
 154   uintptr_t addr = 0;
 155 
 156   if (page != NULL) {
 157     addr = page->alloc_object(size);
 158   }
 159 
 160   if (addr == 0) {
 161     // Allocate new page
 162     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
 163     if (page != NULL) {
 164       addr = page->alloc_object(size);
 165     }
 166     _worker_small_page.set(page);
 167   }
 168 
 169   return addr;
 170 }
 171 
 172 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
 173   if (flags.worker_thread()) {
 174     return alloc_small_object_from_worker(size, flags);
 175   } else {
 176     return alloc_small_object_from_nonworker(size, flags);
 177   }
 178 }
 179 
 180 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
 181   if (size <= ZObjectSizeLimitSmall) {
 182     // Small
 183     return alloc_small_object(size, flags);
 184   } else if (size <= ZObjectSizeLimitMedium) {
 185     // Medium
 186     return alloc_medium_object(size, flags);
 187   } else {
 188     // Large
 189     return alloc_large_object(size, flags);
 190   }
 191 }
 192 
 193 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
 194   assert(ZThread::is_java(), "Must be a Java thread");
 195 
 196   ZAllocationFlags flags;
 197   flags.set_java_thread();
 198   flags.set_no_reserve();
 199 
 200   if (!ZStallOnOutOfMemory) {
 201     flags.set_non_blocking();
 202   }
 203 
 204   return alloc_object(size, flags);
 205 }
 206 
 207 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
 208   assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
 209 
 210   ZAllocationFlags flags;
 211   flags.set_relocation();
 212   flags.set_non_blocking();
 213 
 214   if (ZThread::is_worker()) {
 215     flags.set_worker_thread();
 216   } else if (ZThread::is_java()) {
 217     flags.set_java_thread();
 218   }
 219 
 220   return alloc_object(size, flags);
 221 }
 222 
 223 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
 224   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 225 
 226   // Undo page allocation
 227   ZHeap::heap()->undo_alloc_page(page);
 228   return true;
 229 }
 230 
 231 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
 232   assert(page->type() == ZPageTypeMedium, "Invalid page type");
 233 
 234   // Try atomic undo on shared page
 235   return page->undo_alloc_object_atomic(addr, size);
 236 }
 237 
 238 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
 239   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 240 
 241   if (ZThread::is_java()) {
 242     // Try undo allocation in TLAB
 243     if (Thread::current()->tlab().undo_allocate((HeapWord*)addr, ZUtils::bytes_to_words(size))) {
 244       return true;
 245     }
 246   }
 247 
 248   // Try atomic undo on shared page
 249   return page->undo_alloc_object_atomic(addr, size);
 250 }
 251 
 252 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
 253   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 254   assert(page == _worker_small_page.get(), "Invalid page");
 255 
 256   // Non-atomic undo on worker-local page
 257   const bool success = page->undo_alloc_object(addr, size);
 258   assert(success, "Should always succeed");
 259   return success;
 260 }
 261 
 262 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
 263   if (ZThread::is_worker()) {
 264     return undo_alloc_small_object_from_worker(page, addr, size);
 265   } else {
 266     return undo_alloc_small_object_from_nonworker(page, addr, size);
 267   }
 268 }
 269 
 270 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
 271   const uint8_t type = page->type();
 272 
 273   if (type == ZPageTypeSmall) {
 274     return undo_alloc_small_object(page, addr, size);
 275   } else if (type == ZPageTypeMedium) {
 276     return undo_alloc_medium_object(page, addr, size);
 277   } else {
 278     return undo_alloc_large_object(page);
 279   }
 280 }
 281 
 282 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
 283   if (undo_alloc_object(page, addr, size)) {
 284     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
 285   } else {
 286     ZStatInc(ZCounterUndoObjectAllocationFailed);
 287     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
 288                   addr, size, ZThread::id(), ZThread::name());
 289   }
 290 }
 291 
 292 size_t ZObjectAllocator::used() const {
 293   size_t total_used = 0;
 294 
 295   ZPerCPUConstIterator<size_t> iter(&_used);
 296   for (const size_t* cpu_used; iter.next(&cpu_used);) {
 297     total_used += *cpu_used;
 298   }
 299 
 300   return total_used;
 301 }
 302 
 303 size_t ZObjectAllocator::remaining() const {
 304   assert(ZThread::is_java(), "Should be a Java thread");
 305 
 306   ZPage* page = _shared_small_page.get();
 307   if (page != NULL) {
 308     return page->remaining();
 309   }
 310 
 311   return 0;
 312 }
 313 
 314 void ZObjectAllocator::retire_tlabs() {
 315   ZStatTimer timer(ZSubPhasePauseRetireTLABS);
 316   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 317 
 318   // Retire TLABs
 319   if (UseTLAB) {
 320     ZCollectedHeap* heap = ZCollectedHeap::heap();
 321     heap->accumulate_statistics_all_tlabs();
 322     heap->ensure_parsability(true /* retire_tlabs */);
 323     heap->resize_all_tlabs();
 324   }
 325 
 326   // Reset used
 327   _used.set_all(0);
 328 
 329   // Reset allocation pages
 330   _shared_medium_page.set(NULL);
 331   _shared_small_page.set_all(NULL);
 332   _worker_small_page.set_all(NULL);
 333 }
 334 
 335 static void remap_tlab_address(HeapWord** p) {
 336   *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
 337 }
 338 
 339 void ZObjectAllocator::remap_tlabs() {
 340   ZStatTimer timer(ZSubPhasePauseRemapTLABS);
 341   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 342 
 343   if (UseTLAB) {
 344     for (JavaThreadIteratorWithHandle iter; JavaThread* thread = iter.next(); ) {
 345       thread->tlab().addresses_do(remap_tlab_address);
 346     }
 347   }
 348 }