1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcHeapSummary.hpp" 26 #include "gc/shared/suspendibleThreadSet.hpp" 27 #include "gc/z/zCollectedHeap.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zHeap.inline.hpp" 30 #include "gc/z/zNMethodTable.hpp" 31 #include "gc/z/zServiceability.hpp" 32 #include "gc/z/zStat.hpp" 33 #include "gc/z/zUtils.inline.hpp" 34 #include "runtime/mutexLocker.hpp" 35 36 ZCollectedHeap* ZCollectedHeap::heap() { 37 CollectedHeap* heap = Universe::heap(); 38 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()"); 39 assert(heap->kind() == CollectedHeap::Z, "Invalid name"); 40 return (ZCollectedHeap*)heap; 41 } 42 43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) : 44 _collector_policy(policy), 45 _soft_ref_policy(), 46 _barrier_set(), 47 _initialize(&_barrier_set), 48 _heap(), 49 _director(new ZDirector()), 50 _driver(new ZDriver()), 51 _stat(new ZStat()), 52 _runtime_workers() {} 53 54 CollectedHeap::Name ZCollectedHeap::kind() const { 55 return CollectedHeap::Z; 56 } 57 58 const char* ZCollectedHeap::name() const { 59 return ZGCName; 60 } 61 62 jint ZCollectedHeap::initialize() { 63 if (!_heap.is_initialized()) { 64 return JNI_ENOMEM; 65 } 66 67 initialize_reserved_region((HeapWord*)ZAddressReservedStart(), 68 (HeapWord*)ZAddressReservedEnd()); 69 70 // ZGC fixes up nmethods lazily, so the assumption that nmethods will 71 // look good after GC safepoints, does not hold. 72 Universe::disable_verify_subset(Universe::Verify_CodeCacheOops); 73 74 return JNI_OK; 75 } 76 77 void ZCollectedHeap::initialize_serviceability() { 78 _heap.serviceability_initialize(); 79 } 80 81 void ZCollectedHeap::stop() { 82 _director->stop(); 83 _driver->stop(); 84 _stat->stop(); 85 } 86 87 CollectorPolicy* ZCollectedHeap::collector_policy() const { 88 return _collector_policy; 89 } 90 91 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() { 92 return &_soft_ref_policy; 93 } 94 95 size_t ZCollectedHeap::max_capacity() const { 96 return _heap.max_capacity(); 97 } 98 99 size_t ZCollectedHeap::capacity() const { 100 return _heap.capacity(); 101 } 102 103 size_t ZCollectedHeap::used() const { 104 return _heap.used(); 105 } 106 107 bool ZCollectedHeap::is_maximal_no_gc() const { 108 // Not supported 109 ShouldNotReachHere(); 110 return false; 111 } 112 113 bool ZCollectedHeap::is_scavengable(oop obj) { 114 return false; 115 } 116 117 bool ZCollectedHeap::is_in(const void* p) const { 118 return is_in_reserved(p) && _heap.is_in((uintptr_t)p); 119 } 120 121 bool ZCollectedHeap::is_in_closed_subset(const void* p) const { 122 return is_in(p); 123 } 124 125 void ZCollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 126 // Does nothing, not a parsable heap 127 } 128 129 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 130 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); 131 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); 132 133 if (addr != 0) { 134 *actual_size = requested_size; 135 } 136 137 return (HeapWord*)addr; 138 } 139 140 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { 141 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size)); 142 return (HeapWord*)_heap.alloc_object(size_in_bytes); 143 } 144 145 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 146 size_t size, 147 Metaspace::MetadataType mdtype) { 148 MetaWord* result; 149 150 // Start asynchronous GC 151 collect(GCCause::_metadata_GC_threshold); 152 153 // Expand and retry allocation 154 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 155 if (result != NULL) { 156 return result; 157 } 158 159 // Start synchronous GC 160 collect(GCCause::_metadata_GC_clear_soft_refs); 161 162 // Retry allocation 163 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 164 if (result != NULL) { 165 return result; 166 } 167 168 // Expand and retry allocation 169 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 170 if (result != NULL) { 171 return result; 172 } 173 174 // Out of memory 175 return NULL; 176 } 177 178 void ZCollectedHeap::collect(GCCause::Cause cause) { 179 _driver->collect(cause); 180 } 181 182 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 183 // These collection requests are ignored since ZGC can't run a synchronous 184 // GC cycle from within the VM thread. This is considered benign, since the 185 // only GC causes coming in here should be heap dumper and heap inspector. 186 // However, neither the heap dumper nor the heap inspector really need a GC 187 // to happen, but the result of their heap iterations might in that case be 188 // less accurate since they might include objects that would otherwise have 189 // been collected by a GC. 190 assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); 191 guarantee(cause == GCCause::_heap_dump || 192 cause == GCCause::_heap_inspection, "Invalid cause"); 193 } 194 195 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) { 196 // Not supported 197 ShouldNotReachHere(); 198 } 199 200 bool ZCollectedHeap::supports_tlab_allocation() const { 201 return true; 202 } 203 204 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const { 205 return _heap.tlab_capacity(); 206 } 207 208 size_t ZCollectedHeap::tlab_used(Thread* ignored) const { 209 return _heap.tlab_used(); 210 } 211 212 size_t ZCollectedHeap::max_tlab_size() const { 213 return _heap.max_tlab_size(); 214 } 215 216 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 217 return _heap.unsafe_max_tlab_alloc(); 218 } 219 220 bool ZCollectedHeap::can_elide_tlab_store_barriers() const { 221 return false; 222 } 223 224 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 225 // Not supported 226 ShouldNotReachHere(); 227 return true; 228 } 229 230 bool ZCollectedHeap::card_mark_must_follow_store() const { 231 // Not supported 232 ShouldNotReachHere(); 233 return false; 234 } 235 236 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() { 237 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager()); 238 } 239 240 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() { 241 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool()); 242 } 243 244 void ZCollectedHeap::object_iterate(ObjectClosure* cl) { 245 _heap.object_iterate(cl, true /* visit_referents */); 246 } 247 248 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 249 _heap.object_iterate(cl, true /* visit_referents */); 250 } 251 252 HeapWord* ZCollectedHeap::block_start(const void* addr) const { 253 return (HeapWord*)_heap.block_start((uintptr_t)addr); 254 } 255 256 size_t ZCollectedHeap::block_size(const HeapWord* addr) const { 257 size_t size_in_bytes = _heap.block_size((uintptr_t)addr); 258 return ZUtils::bytes_to_words(size_in_bytes); 259 } 260 261 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const { 262 return _heap.block_is_obj((uintptr_t)addr); 263 } 264 265 void ZCollectedHeap::register_nmethod(nmethod* nm) { 266 assert_locked_or_safepoint(CodeCache_lock); 267 ZNMethodTable::register_nmethod(nm); 268 } 269 270 void ZCollectedHeap::unregister_nmethod(nmethod* nm) { 271 assert_locked_or_safepoint(CodeCache_lock); 272 ZNMethodTable::unregister_nmethod(nm); 273 } 274 275 void ZCollectedHeap::verify_nmethod(nmethod* nm) { 276 // Does nothing 277 } 278 279 WorkGang* ZCollectedHeap::get_safepoint_workers() { 280 return _runtime_workers.workers(); 281 } 282 283 jlong ZCollectedHeap::millis_since_last_gc() { 284 return ZStatCycle::time_since_last() / MILLIUNITS; 285 } 286 287 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { 288 tc->do_thread(_director); 289 tc->do_thread(_driver); 290 tc->do_thread(_stat); 291 _heap.worker_threads_do(tc); 292 _runtime_workers.threads_do(tc); 293 } 294 295 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() { 296 const size_t capacity_in_words = capacity() / HeapWordSize; 297 const size_t max_capacity_in_words = max_capacity() / HeapWordSize; 298 return VirtualSpaceSummary(reserved_region().start(), 299 reserved_region().start() + capacity_in_words, 300 reserved_region().start() + max_capacity_in_words); 301 } 302 303 void ZCollectedHeap::safepoint_synchronize_begin() { 304 SuspendibleThreadSet::synchronize(); 305 } 306 307 void ZCollectedHeap::safepoint_synchronize_end() { 308 SuspendibleThreadSet::desynchronize(); 309 } 310 311 void ZCollectedHeap::prepare_for_verify() { 312 // Does nothing 313 } 314 315 void ZCollectedHeap::print_on(outputStream* st) const { 316 _heap.print_on(st); 317 } 318 319 void ZCollectedHeap::print_on_error(outputStream* st) const { 320 CollectedHeap::print_on_error(st); 321 322 st->print_cr("Address Space"); 323 st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart); 324 st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd); 325 st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize); 326 st->print_cr( "Heap"); 327 st->print_cr( " GlobalPhase: %u", ZGlobalPhase); 328 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum); 329 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax); 330 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall); 331 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium); 332 st->print_cr( "Metadata Bits"); 333 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask); 334 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask); 335 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask); 336 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked); 337 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped); 338 } 339 340 void ZCollectedHeap::print_extended_on(outputStream* st) const { 341 _heap.print_extended_on(st); 342 } 343 344 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const { 345 _director->print_on(st); 346 st->cr(); 347 _driver->print_on(st); 348 st->cr(); 349 _stat->print_on(st); 350 st->cr(); 351 _heap.print_worker_threads_on(st); 352 _runtime_workers.print_threads_on(st); 353 } 354 355 void ZCollectedHeap::print_tracing_info() const { 356 // Does nothing 357 } 358 359 void ZCollectedHeap::verify(VerifyOption option /* ignored */) { 360 _heap.verify(); 361 } 362 363 bool ZCollectedHeap::is_oop(oop object) const { 364 return CollectedHeap::is_oop(object) && _heap.is_oop(object); 365 }