1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  26 #include "gc/z/zCollectedHeap.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zObjectAllocator.hpp"
  30 #include "gc/z/zPage.inline.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zThread.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/safepoint.hpp"
  37 #include "runtime/thread.hpp"
  38 #include "runtime/threadSMR.hpp"
  39 #include "utilities/align.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
  43 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
  44 static const ZStatSubPhase ZSubPhasePauseRetireTLABS("Pause Retire TLABS");
  45 static const ZStatSubPhase ZSubPhasePauseRemapTLABS("Pause Remap TLABS");
  46 
  47 ZObjectAllocator::ZObjectAllocator(uint nworkers) :
  48     _nworkers(nworkers),
  49     _used(0),
  50     _shared_medium_page(NULL),
  51     _shared_small_page(NULL),
  52     _worker_small_page(NULL) {}
  53 
  54 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
  55   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
  56   if (page != NULL) {
  57     // Increment used bytes
  58     Atomic::add(size, _used.addr());
  59   }
  60 
  61   return page;
  62 }
  63 
  64 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
  65                                                         uint8_t page_type,
  66                                                         size_t page_size,
  67                                                         size_t size,
  68                                                         ZAllocationFlags flags) {
  69   uintptr_t addr = 0;
  70   ZPage* page = *shared_page;
  71 
  72   if (page != NULL) {
  73     addr = page->alloc_object_atomic(size);
  74   }
  75 
  76   if (addr == 0) {
  77     // Allocate new page
  78     ZPage* const new_page = alloc_page(page_type, page_size, flags);
  79     if (new_page != NULL) {
  80       // Allocate object before installing the new page
  81       addr = new_page->alloc_object(size);
  82 
  83     retry:
  84       // Install new page
  85       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
  86       if (prev_page != page) {
  87         if (prev_page == NULL) {
  88           // Previous page was retired, retry installing the new page
  89           page = prev_page;
  90           goto retry;
  91         }
  92 
  93         // Another page already installed, try allocation there first
  94         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
  95         if (prev_addr == 0) {
  96           // Allocation failed, retry installing the new page
  97           page = prev_page;
  98           goto retry;
  99         }
 100 
 101         // Allocation succeeded in already installed page
 102         addr = prev_addr;
 103 
 104         // Undo new page allocation
 105         ZHeap::heap()->undo_alloc_page(new_page);
 106       }
 107     }
 108   }
 109 
 110   return addr;
 111 }
 112 
 113 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
 114   assert(ZThread::is_java(), "Should be a Java thread");
 115 
 116   uintptr_t addr = 0;
 117 
 118   // Allocate new large page
 119   const size_t page_size = align_up(size, ZPageSizeMin);
 120   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
 121   if (page != NULL) {
 122     // Allocate the object
 123     addr = page->alloc_object(size);
 124   }
 125 
 126   return addr;
 127 }
 128 
 129 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
 130   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
 131 }
 132 
 133 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
 134   assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
 135 
 136   // Non-worker small page allocation can never use the reserve
 137   flags.set_no_reserve();
 138 
 139   return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
 140 }
 141 
 142 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
 143   assert(ZThread::is_worker(), "Should be a worker thread");
 144 
 145   ZPage* page = _worker_small_page.get();
 146   uintptr_t addr = 0;
 147 
 148   if (page != NULL) {
 149     addr = page->alloc_object(size);
 150   }
 151 
 152   if (addr == 0) {
 153     // Allocate new page
 154     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
 155     if (page != NULL) {
 156       addr = page->alloc_object(size);
 157     }
 158     _worker_small_page.set(page);
 159   }
 160 
 161   return addr;
 162 }
 163 
 164 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
 165   if (flags.worker_thread()) {
 166     return alloc_small_object_from_worker(size, flags);
 167   } else {
 168     return alloc_small_object_from_nonworker(size, flags);
 169   }
 170 }
 171 
 172 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
 173   if (size <= ZObjectSizeLimitSmall) {
 174     // Small
 175     return alloc_small_object(size, flags);
 176   } else if (size <= ZObjectSizeLimitMedium) {
 177     // Medium
 178     return alloc_medium_object(size, flags);
 179   } else {
 180     // Large
 181     return alloc_large_object(size, flags);
 182   }
 183 }
 184 
 185 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
 186   assert(ZThread::is_java(), "Must be a Java thread");
 187 
 188   ZAllocationFlags flags;
 189   flags.set_no_reserve();
 190 
 191   if (!ZStallOnOutOfMemory) {
 192     flags.set_non_blocking();
 193   }
 194 
 195   return alloc_object(size, flags);
 196 }
 197 
 198 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
 199   assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
 200 
 201   ZAllocationFlags flags;
 202   flags.set_relocation();
 203   flags.set_non_blocking();
 204 
 205   if (ZThread::is_worker()) {
 206     flags.set_worker_thread();
 207   }
 208 
 209   return alloc_object(size, flags);
 210 }
 211 
 212 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
 213   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 214 
 215   // Undo page allocation
 216   ZHeap::heap()->undo_alloc_page(page);
 217   return true;
 218 }
 219 
 220 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
 221   assert(page->type() == ZPageTypeMedium, "Invalid page type");
 222 
 223   // Try atomic undo on shared page
 224   return page->undo_alloc_object_atomic(addr, size);
 225 }
 226 
 227 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
 228   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 229 
 230   // Try atomic undo on shared page
 231   return page->undo_alloc_object_atomic(addr, size);
 232 }
 233 
 234 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
 235   assert(page->type() == ZPageTypeSmall, "Invalid page type");
 236   assert(page == _worker_small_page.get(), "Invalid page");
 237 
 238   // Non-atomic undo on worker-local page
 239   const bool success = page->undo_alloc_object(addr, size);
 240   assert(success, "Should always succeed");
 241   return success;
 242 }
 243 
 244 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
 245   if (ZThread::is_worker()) {
 246     return undo_alloc_small_object_from_worker(page, addr, size);
 247   } else {
 248     return undo_alloc_small_object_from_nonworker(page, addr, size);
 249   }
 250 }
 251 
 252 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
 253   const uint8_t type = page->type();
 254 
 255   if (type == ZPageTypeSmall) {
 256     return undo_alloc_small_object(page, addr, size);
 257   } else if (type == ZPageTypeMedium) {
 258     return undo_alloc_medium_object(page, addr, size);
 259   } else {
 260     return undo_alloc_large_object(page);
 261   }
 262 }
 263 
 264 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
 265   if (undo_alloc_object(page, addr, size)) {
 266     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
 267   } else {
 268     ZStatInc(ZCounterUndoObjectAllocationFailed);
 269     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
 270                   addr, size, ZThread::id(), ZThread::name());
 271   }
 272 }
 273 
 274 size_t ZObjectAllocator::used() const {
 275   size_t total_used = 0;
 276 
 277   ZPerCPUConstIterator<size_t> iter(&_used);
 278   for (const size_t* cpu_used; iter.next(&cpu_used);) {
 279     total_used += *cpu_used;
 280   }
 281 
 282   return total_used;
 283 }
 284 
 285 size_t ZObjectAllocator::remaining() const {
 286   assert(ZThread::is_java(), "Should be a Java thread");
 287 
 288   ZPage* page = _shared_small_page.get();
 289   if (page != NULL) {
 290     return page->remaining();
 291   }
 292 
 293   return 0;
 294 }
 295 
 296 void ZObjectAllocator::retire_tlabs() {
 297   ZStatTimer timer(ZSubPhasePauseRetireTLABS);
 298   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 299 
 300   // Retire TLABs
 301   if (UseTLAB) {
 302     ZCollectedHeap* heap = ZCollectedHeap::heap();
 303     heap->ensure_parsability(true /* retire_tlabs */);
 304     heap->resize_all_tlabs();
 305   }
 306 
 307   // Reset used
 308   _used.set_all(0);
 309 
 310   // Reset allocation pages
 311   _shared_medium_page.set(NULL);
 312   _shared_small_page.set_all(NULL);
 313   _worker_small_page.set_all(NULL);
 314 }
 315 
 316 static void remap_tlab_address(HeapWord** p) {
 317   *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
 318 }
 319 
 320 void ZObjectAllocator::remap_tlabs() {
 321   ZStatTimer timer(ZSubPhasePauseRemapTLABS);
 322   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 323 
 324   if (UseTLAB) {
 325     for (JavaThreadIteratorWithHandle iter; JavaThread* thread = iter.next(); ) {
 326       thread->tlab().addresses_do(remap_tlab_address);
 327     }
 328   }
 329 }