1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcHeapSummary.hpp" 26 #include "gc/shared/suspendibleThreadSet.hpp" 27 #include "gc/z/zCollectedHeap.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zHeap.inline.hpp" 30 #include "gc/z/zNMethod.hpp" 31 #include "gc/z/zServiceability.hpp" 32 #include "gc/z/zStat.hpp" 33 #include "gc/z/zUtils.inline.hpp" 34 #include "memory/metaspace/classLoaderMetaspace.hpp" 35 #include "memory/metaspace/metaspaceEnums.hpp" 36 #include "memory/universe.hpp" 37 #include "runtime/mutexLocker.hpp" 38 39 ZCollectedHeap* ZCollectedHeap::heap() { 40 CollectedHeap* heap = Universe::heap(); 41 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()"); 42 assert(heap->kind() == CollectedHeap::Z, "Invalid name"); 43 return (ZCollectedHeap*)heap; 44 } 45 46 ZCollectedHeap::ZCollectedHeap() : 47 _soft_ref_policy(), 48 _barrier_set(), 49 _initialize(&_barrier_set), 50 _heap(), 51 _director(new ZDirector()), 52 _driver(new ZDriver()), 53 _uncommitter(new ZUncommitter()), 54 _stat(new ZStat()), 55 _runtime_workers() {} 56 57 CollectedHeap::Name ZCollectedHeap::kind() const { 58 return CollectedHeap::Z; 59 } 60 61 const char* ZCollectedHeap::name() const { 62 return ZName; 63 } 64 65 jint ZCollectedHeap::initialize() { 66 if (!_heap.is_initialized()) { 67 return JNI_ENOMEM; 68 } 69 70 initialize_reserved_region((HeapWord*)ZAddressReservedStart, 71 (HeapWord*)ZAddressReservedEnd); 72 73 return JNI_OK; 74 } 75 76 void ZCollectedHeap::initialize_serviceability() { 77 _heap.serviceability_initialize(); 78 } 79 80 void ZCollectedHeap::stop() { 81 _director->stop(); 82 _driver->stop(); 83 _uncommitter->stop(); 84 _stat->stop(); 85 } 86 87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() { 88 return &_soft_ref_policy; 89 } 90 91 size_t ZCollectedHeap::max_capacity() const { 92 return _heap.max_capacity(); 93 } 94 95 size_t ZCollectedHeap::capacity() const { 96 return _heap.capacity(); 97 } 98 99 size_t ZCollectedHeap::used() const { 100 return _heap.used(); 101 } 102 103 size_t ZCollectedHeap::unused() const { 104 return _heap.unused(); 105 } 106 107 bool ZCollectedHeap::is_maximal_no_gc() const { 108 // Not supported 109 ShouldNotReachHere(); 110 return false; 111 } 112 113 bool ZCollectedHeap::is_in(const void* p) const { 114 return _heap.is_in((uintptr_t)p); 115 } 116 117 uint32_t ZCollectedHeap::hash_oop(oop obj) const { 118 return _heap.hash_oop(obj); 119 } 120 121 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 122 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); 123 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); 124 125 if (addr != 0) { 126 *actual_size = requested_size; 127 } 128 129 return (HeapWord*)addr; 130 } 131 132 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { 133 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size)); 134 return (HeapWord*)_heap.alloc_object(size_in_bytes); 135 } 136 137 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 138 size_t size, 139 metaspace::MetadataType mdtype) { 140 MetaWord* result; 141 142 // Start asynchronous GC 143 collect(GCCause::_metadata_GC_threshold); 144 145 // Expand and retry allocation 146 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 147 if (result != NULL) { 148 return result; 149 } 150 151 // Start synchronous GC 152 collect(GCCause::_metadata_GC_clear_soft_refs); 153 154 // Retry allocation 155 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 156 if (result != NULL) { 157 return result; 158 } 159 160 // Expand and retry allocation 161 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 162 if (result != NULL) { 163 return result; 164 } 165 166 // Out of memory 167 return NULL; 168 } 169 170 void ZCollectedHeap::collect(GCCause::Cause cause) { 171 _driver->collect(cause); 172 } 173 174 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 175 // These collection requests are ignored since ZGC can't run a synchronous 176 // GC cycle from within the VM thread. This is considered benign, since the 177 // only GC causes coming in here should be heap dumper and heap inspector. 178 // However, neither the heap dumper nor the heap inspector really need a GC 179 // to happen, but the result of their heap iterations might in that case be 180 // less accurate since they might include objects that would otherwise have 181 // been collected by a GC. 182 assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); 183 guarantee(cause == GCCause::_heap_dump || 184 cause == GCCause::_heap_inspection, "Invalid cause"); 185 } 186 187 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) { 188 // Not supported 189 ShouldNotReachHere(); 190 } 191 192 bool ZCollectedHeap::supports_tlab_allocation() const { 193 return true; 194 } 195 196 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const { 197 return _heap.tlab_capacity(); 198 } 199 200 size_t ZCollectedHeap::tlab_used(Thread* ignored) const { 201 return _heap.tlab_used(); 202 } 203 204 size_t ZCollectedHeap::max_tlab_size() const { 205 return _heap.max_tlab_size(); 206 } 207 208 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 209 return _heap.unsafe_max_tlab_alloc(); 210 } 211 212 bool ZCollectedHeap::can_elide_tlab_store_barriers() const { 213 return false; 214 } 215 216 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 217 // Not supported 218 ShouldNotReachHere(); 219 return true; 220 } 221 222 bool ZCollectedHeap::card_mark_must_follow_store() const { 223 // Not supported 224 ShouldNotReachHere(); 225 return false; 226 } 227 228 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() { 229 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager()); 230 } 231 232 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() { 233 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool()); 234 } 235 236 void ZCollectedHeap::object_iterate(ObjectClosure* cl) { 237 _heap.object_iterate(cl, true /* visit_weaks */); 238 } 239 240 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 241 _heap.object_iterate(cl, true /* visit_weaks */); 242 } 243 244 HeapWord* ZCollectedHeap::block_start(const void* addr) const { 245 return (HeapWord*)_heap.block_start((uintptr_t)addr); 246 } 247 248 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const { 249 return _heap.block_is_obj((uintptr_t)addr); 250 } 251 252 void ZCollectedHeap::register_nmethod(nmethod* nm) { 253 ZNMethod::register_nmethod(nm); 254 } 255 256 void ZCollectedHeap::unregister_nmethod(nmethod* nm) { 257 ZNMethod::unregister_nmethod(nm); 258 } 259 260 void ZCollectedHeap::flush_nmethod(nmethod* nm) { 261 ZNMethod::flush_nmethod(nm); 262 } 263 264 void ZCollectedHeap::verify_nmethod(nmethod* nm) { 265 // Does nothing 266 } 267 268 WorkGang* ZCollectedHeap::get_safepoint_workers() { 269 return _runtime_workers.workers(); 270 } 271 272 jlong ZCollectedHeap::millis_since_last_gc() { 273 return ZStatCycle::time_since_last() / MILLIUNITS; 274 } 275 276 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { 277 tc->do_thread(_director); 278 tc->do_thread(_driver); 279 tc->do_thread(_uncommitter); 280 tc->do_thread(_stat); 281 _heap.worker_threads_do(tc); 282 _runtime_workers.threads_do(tc); 283 } 284 285 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() { 286 const size_t capacity_in_words = capacity() / HeapWordSize; 287 const size_t max_capacity_in_words = max_capacity() / HeapWordSize; 288 return VirtualSpaceSummary(reserved_region().start(), 289 reserved_region().start() + capacity_in_words, 290 reserved_region().start() + max_capacity_in_words); 291 } 292 293 void ZCollectedHeap::safepoint_synchronize_begin() { 294 SuspendibleThreadSet::synchronize(); 295 } 296 297 void ZCollectedHeap::safepoint_synchronize_end() { 298 SuspendibleThreadSet::desynchronize(); 299 } 300 301 void ZCollectedHeap::prepare_for_verify() { 302 // Does nothing 303 } 304 305 void ZCollectedHeap::print_on(outputStream* st) const { 306 _heap.print_on(st); 307 } 308 309 void ZCollectedHeap::print_on_error(outputStream* st) const { 310 CollectedHeap::print_on_error(st); 311 312 st->print_cr("Address Space"); 313 st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart); 314 st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd); 315 st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize); 316 st->print_cr( "Heap"); 317 st->print_cr( " GlobalPhase: %u", ZGlobalPhase); 318 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum); 319 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax); 320 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall); 321 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium); 322 st->print_cr( "Metadata Bits"); 323 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask); 324 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask); 325 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask); 326 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked); 327 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped); 328 } 329 330 void ZCollectedHeap::print_extended_on(outputStream* st) const { 331 _heap.print_extended_on(st); 332 } 333 334 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const { 335 _director->print_on(st); 336 st->cr(); 337 _driver->print_on(st); 338 st->cr(); 339 _uncommitter->print_on(st); 340 st->cr(); 341 _stat->print_on(st); 342 st->cr(); 343 _heap.print_worker_threads_on(st); 344 _runtime_workers.print_threads_on(st); 345 } 346 347 void ZCollectedHeap::print_tracing_info() const { 348 // Does nothing 349 } 350 351 void ZCollectedHeap::verify(VerifyOption option /* ignored */) { 352 _heap.verify(); 353 } 354 355 bool ZCollectedHeap::is_oop(oop object) const { 356 return CollectedHeap::is_oop(object) && _heap.is_oop(object); 357 }