1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcHeapSummary.hpp" 26 #include "gc/shared/suspendibleThreadSet.hpp" 27 #include "gc/z/zCollectedHeap.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zHeap.inline.hpp" 30 #include "gc/z/zNMethod.hpp" 31 #include "gc/z/zObjArrayAllocator.hpp" 32 #include "gc/z/zServiceability.hpp" 33 #include "gc/z/zStat.hpp" 34 #include "gc/z/zUtils.inline.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/mutexLocker.hpp" 37 38 ZCollectedHeap* ZCollectedHeap::heap() { 39 CollectedHeap* heap = Universe::heap(); 40 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()"); 41 assert(heap->kind() == CollectedHeap::Z, "Invalid name"); 42 return (ZCollectedHeap*)heap; 43 } 44 45 ZCollectedHeap::ZCollectedHeap() : 46 _soft_ref_policy(), 47 _barrier_set(), 48 _initialize(&_barrier_set), 49 _heap(), 50 _director(new ZDirector()), 51 _driver(new ZDriver()), 52 _uncommitter(new ZUncommitter()), 53 _stat(new ZStat()), 54 _runtime_workers() {} 55 56 CollectedHeap::Name ZCollectedHeap::kind() const { 57 return CollectedHeap::Z; 58 } 59 60 const char* ZCollectedHeap::name() const { 61 return ZName; 62 } 63 64 jint ZCollectedHeap::initialize() { 65 if (!_heap.is_initialized()) { 66 return JNI_ENOMEM; 67 } 68 69 initialize_reserved_region((HeapWord*)ZAddressReservedStart, 70 (HeapWord*)ZAddressReservedEnd); 71 72 return JNI_OK; 73 } 74 75 void ZCollectedHeap::initialize_serviceability() { 76 _heap.serviceability_initialize(); 77 } 78 79 void ZCollectedHeap::stop() { 80 _director->stop(); 81 _driver->stop(); 82 _uncommitter->stop(); 83 _stat->stop(); 84 } 85 86 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() { 87 return &_soft_ref_policy; 88 } 89 90 size_t ZCollectedHeap::max_capacity() const { 91 return _heap.max_capacity(); 92 } 93 94 size_t ZCollectedHeap::capacity() const { 95 return _heap.capacity(); 96 } 97 98 size_t ZCollectedHeap::used() const { 99 return _heap.used(); 100 } 101 102 size_t ZCollectedHeap::unused() const { 103 return _heap.unused(); 104 } 105 106 bool ZCollectedHeap::is_maximal_no_gc() const { 107 // Not supported 108 ShouldNotReachHere(); 109 return false; 110 } 111 112 bool ZCollectedHeap::is_in(const void* p) const { 113 return _heap.is_in((uintptr_t)p); 114 } 115 116 uint32_t ZCollectedHeap::hash_oop(oop obj) const { 117 return _heap.hash_oop(obj); 118 } 119 120 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 121 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); 122 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); 123 124 if (addr != 0) { 125 *actual_size = requested_size; 126 } 127 128 return (HeapWord*)addr; 129 } 130 131 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) { 132 if (!do_zero) { 133 return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD); 134 } 135 136 ZObjArrayAllocator allocator(klass, size, length, THREAD); 137 return allocator.allocate(); 138 } 139 140 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { 141 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size)); 142 return (HeapWord*)_heap.alloc_object(size_in_bytes); 143 } 144 145 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 146 size_t size, 147 Metaspace::MetadataType mdtype) { 148 MetaWord* result; 149 150 // Start asynchronous GC 151 collect(GCCause::_metadata_GC_threshold); 152 153 // Expand and retry allocation 154 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 155 if (result != NULL) { 156 return result; 157 } 158 159 // Start synchronous GC 160 collect(GCCause::_metadata_GC_clear_soft_refs); 161 162 // Retry allocation 163 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 164 if (result != NULL) { 165 return result; 166 } 167 168 // Expand and retry allocation 169 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 170 if (result != NULL) { 171 return result; 172 } 173 174 // Out of memory 175 return NULL; 176 } 177 178 void ZCollectedHeap::collect(GCCause::Cause cause) { 179 _driver->collect(cause); 180 } 181 182 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 183 // These collection requests are ignored since ZGC can't run a synchronous 184 // GC cycle from within the VM thread. This is considered benign, since the 185 // only GC causes coming in here should be heap dumper and heap inspector. 186 // However, neither the heap dumper nor the heap inspector really need a GC 187 // to happen, but the result of their heap iterations might in that case be 188 // less accurate since they might include objects that would otherwise have 189 // been collected by a GC. 190 assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); 191 guarantee(cause == GCCause::_heap_dump || 192 cause == GCCause::_heap_inspection, "Invalid cause"); 193 } 194 195 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) { 196 // Not supported 197 ShouldNotReachHere(); 198 } 199 200 bool ZCollectedHeap::supports_tlab_allocation() const { 201 return true; 202 } 203 204 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const { 205 return _heap.tlab_capacity(); 206 } 207 208 size_t ZCollectedHeap::tlab_used(Thread* ignored) const { 209 return _heap.tlab_used(); 210 } 211 212 size_t ZCollectedHeap::max_tlab_size() const { 213 return _heap.max_tlab_size(); 214 } 215 216 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 217 return _heap.unsafe_max_tlab_alloc(); 218 } 219 220 bool ZCollectedHeap::can_elide_tlab_store_barriers() const { 221 return false; 222 } 223 224 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 225 // Not supported 226 ShouldNotReachHere(); 227 return true; 228 } 229 230 bool ZCollectedHeap::card_mark_must_follow_store() const { 231 // Not supported 232 ShouldNotReachHere(); 233 return false; 234 } 235 236 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() { 237 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager()); 238 } 239 240 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() { 241 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool()); 242 } 243 244 void ZCollectedHeap::object_iterate(ObjectClosure* cl) { 245 _heap.object_iterate(cl, true /* visit_weaks */); 246 } 247 248 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 249 _heap.object_iterate(cl, true /* visit_weaks */); 250 } 251 252 HeapWord* ZCollectedHeap::block_start(const void* addr) const { 253 return (HeapWord*)_heap.block_start((uintptr_t)addr); 254 } 255 256 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const { 257 return _heap.block_is_obj((uintptr_t)addr); 258 } 259 260 void ZCollectedHeap::register_nmethod(nmethod* nm) { 261 ZNMethod::register_nmethod(nm); 262 } 263 264 void ZCollectedHeap::unregister_nmethod(nmethod* nm) { 265 ZNMethod::unregister_nmethod(nm); 266 } 267 268 void ZCollectedHeap::flush_nmethod(nmethod* nm) { 269 ZNMethod::flush_nmethod(nm); 270 } 271 272 void ZCollectedHeap::verify_nmethod(nmethod* nm) { 273 // Does nothing 274 } 275 276 WorkGang* ZCollectedHeap::get_safepoint_workers() { 277 return _runtime_workers.workers(); 278 } 279 280 jlong ZCollectedHeap::millis_since_last_gc() { 281 return ZStatCycle::time_since_last() / MILLIUNITS; 282 } 283 284 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { 285 tc->do_thread(_director); 286 tc->do_thread(_driver); 287 tc->do_thread(_uncommitter); 288 tc->do_thread(_stat); 289 _heap.worker_threads_do(tc); 290 _runtime_workers.threads_do(tc); 291 } 292 293 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() { 294 const size_t capacity_in_words = capacity() / HeapWordSize; 295 const size_t max_capacity_in_words = max_capacity() / HeapWordSize; 296 return VirtualSpaceSummary(reserved_region().start(), 297 reserved_region().start() + capacity_in_words, 298 reserved_region().start() + max_capacity_in_words); 299 } 300 301 void ZCollectedHeap::safepoint_synchronize_begin() { 302 SuspendibleThreadSet::synchronize(); 303 } 304 305 void ZCollectedHeap::safepoint_synchronize_end() { 306 SuspendibleThreadSet::desynchronize(); 307 } 308 309 void ZCollectedHeap::prepare_for_verify() { 310 // Does nothing 311 } 312 313 void ZCollectedHeap::print_on(outputStream* st) const { 314 _heap.print_on(st); 315 } 316 317 void ZCollectedHeap::print_on_error(outputStream* st) const { 318 CollectedHeap::print_on_error(st); 319 320 st->print_cr("Address Space"); 321 st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart); 322 st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd); 323 st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize); 324 st->print_cr( "Heap"); 325 st->print_cr( " GlobalPhase: %u", ZGlobalPhase); 326 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum); 327 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax); 328 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall); 329 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium); 330 st->print_cr( "Metadata Bits"); 331 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask); 332 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask); 333 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask); 334 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked); 335 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped); 336 } 337 338 void ZCollectedHeap::print_extended_on(outputStream* st) const { 339 _heap.print_extended_on(st); 340 } 341 342 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const { 343 _director->print_on(st); 344 st->cr(); 345 _driver->print_on(st); 346 st->cr(); 347 _uncommitter->print_on(st); 348 st->cr(); 349 _stat->print_on(st); 350 st->cr(); 351 _heap.print_worker_threads_on(st); 352 _runtime_workers.print_threads_on(st); 353 } 354 355 void ZCollectedHeap::print_tracing_info() const { 356 // Does nothing 357 } 358 359 void ZCollectedHeap::verify(VerifyOption option /* ignored */) { 360 _heap.verify(); 361 } 362 363 bool ZCollectedHeap::is_oop(oop object) const { 364 return CollectedHeap::is_oop(object) && _heap.is_oop(object); 365 }