1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcHeapSummary.hpp" 26 #include "gc/shared/memAllocator.hpp" 27 #include "gc/shared/suspendibleThreadSet.hpp" 28 #include "gc/z/zCollectedHeap.hpp" 29 #include "gc/z/zGlobals.hpp" 30 #include "gc/z/zHeap.inline.hpp" 31 #include "gc/z/zNMethod.hpp" 32 #include "gc/z/zServiceability.hpp" 33 #include "gc/z/zStat.hpp" 34 #include "gc/z/zUtils.inline.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/interfaceSupport.inline.hpp" 37 #include "runtime/mutexLocker.hpp" 38 39 class PinAllocating { 40 private: 41 const HeapWord* _mem; 42 public: 43 PinAllocating(HeapWord* mem) : 44 _mem(mem) { 45 ZHeap::heap()->pin_allocating((uintptr_t)_mem); 46 } 47 48 ~PinAllocating() { 49 ZHeap::heap()->unpin_allocating((uintptr_t)_mem); 50 } 51 }; 52 53 class ZObjArrayAllocator: public ObjArrayAllocator { 54 private: 55 const bool _is_large; 56 const size_t _chunk_size; 57 58 void mem_clear_large(HeapWord* mem) const { 59 log_info(gc)("ZMemClearer " PTR_FORMAT " " SIZE_FORMAT " M", p2i(mem), _word_size * BytesPerWord / M); 60 assert(mem != NULL, "cannot initialize NULL object"); 61 const size_t hs = oopDesc::header_size(); 62 assert(_word_size >= hs, "unexpected object size"); 63 oopDesc::set_klass_gap(mem, 0); 64 65 PinAllocating pin_allocating(mem); 66 67 size_t offset = hs; 68 while (offset + _chunk_size < _word_size) { 69 Copy::fill_to_aligned_words(mem + offset, _chunk_size); 70 offset += _chunk_size; 71 72 { 73 ThreadBlockInVM tbivm(JavaThread::current()); 74 } 75 76 // Fix colors 77 mem = (HeapWord*)ZAddress::good((uintptr_t)mem); 78 } 79 80 if (offset < _word_size) { 81 Copy::fill_to_aligned_words(mem + offset, _word_size - offset); 82 } 83 } 84 85 protected: 86 virtual HeapWord* mem_clear(HeapWord* mem) const { 87 if (_is_large) { 88 mem_clear_large(mem); 89 } else { 90 mem = ObjArrayAllocator::mem_clear(mem); 91 } 92 return (HeapWord*)ZAddress::good((uintptr_t)mem); 93 } 94 95 public: 96 ZObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, 97 Thread* thread = Thread::current()) : 98 ObjArrayAllocator(klass, word_size, length, do_zero, thread), 99 _is_large(static_cast<size_t>(length) > ZObjectSizeLimitMedium), 100 _chunk_size(ZObjectSizeLimitMedium / BytesPerWord) 101 {} 102 }; 103 104 ZCollectedHeap* ZCollectedHeap::heap() { 105 CollectedHeap* heap = Universe::heap(); 106 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()"); 107 assert(heap->kind() == CollectedHeap::Z, "Invalid name"); 108 return (ZCollectedHeap*)heap; 109 } 110 111 ZCollectedHeap::ZCollectedHeap() : 112 _soft_ref_policy(), 113 _barrier_set(), 114 _initialize(&_barrier_set), 115 _heap(), 116 _director(new ZDirector()), 117 _driver(new ZDriver()), 118 _uncommitter(new ZUncommitter()), 119 _stat(new ZStat()), 120 _runtime_workers() {} 121 122 CollectedHeap::Name ZCollectedHeap::kind() const { 123 return CollectedHeap::Z; 124 } 125 126 const char* ZCollectedHeap::name() const { 127 return ZName; 128 } 129 130 jint ZCollectedHeap::initialize() { 131 if (!_heap.is_initialized()) { 132 return JNI_ENOMEM; 133 } 134 135 initialize_reserved_region((HeapWord*)ZAddressReservedStart, 136 (HeapWord*)ZAddressReservedEnd); 137 138 return JNI_OK; 139 } 140 141 void ZCollectedHeap::initialize_serviceability() { 142 _heap.serviceability_initialize(); 143 } 144 145 void ZCollectedHeap::stop() { 146 _director->stop(); 147 _driver->stop(); 148 _uncommitter->stop(); 149 _stat->stop(); 150 } 151 152 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() { 153 return &_soft_ref_policy; 154 } 155 156 size_t ZCollectedHeap::max_capacity() const { 157 return _heap.max_capacity(); 158 } 159 160 size_t ZCollectedHeap::capacity() const { 161 return _heap.capacity(); 162 } 163 164 size_t ZCollectedHeap::used() const { 165 return _heap.used(); 166 } 167 168 size_t ZCollectedHeap::unused() const { 169 return _heap.unused(); 170 } 171 172 bool ZCollectedHeap::is_maximal_no_gc() const { 173 // Not supported 174 ShouldNotReachHere(); 175 return false; 176 } 177 178 bool ZCollectedHeap::is_in(const void* p) const { 179 return _heap.is_in((uintptr_t)p); 180 } 181 182 uint32_t ZCollectedHeap::hash_oop(oop obj) const { 183 return _heap.hash_oop(obj); 184 } 185 186 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 187 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); 188 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); 189 190 if (addr != 0) { 191 *actual_size = requested_size; 192 } 193 194 return (HeapWord*)addr; 195 } 196 197 oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) { 198 ZObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); 199 return allocator.allocate(); 200 } 201 202 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { 203 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size)); 204 return (HeapWord*)_heap.alloc_object(size_in_bytes); 205 } 206 207 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 208 size_t size, 209 Metaspace::MetadataType mdtype) { 210 MetaWord* result; 211 212 // Start asynchronous GC 213 collect(GCCause::_metadata_GC_threshold); 214 215 // Expand and retry allocation 216 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 217 if (result != NULL) { 218 return result; 219 } 220 221 // Start synchronous GC 222 collect(GCCause::_metadata_GC_clear_soft_refs); 223 224 // Retry allocation 225 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 226 if (result != NULL) { 227 return result; 228 } 229 230 // Expand and retry allocation 231 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 232 if (result != NULL) { 233 return result; 234 } 235 236 // Out of memory 237 return NULL; 238 } 239 240 void ZCollectedHeap::collect(GCCause::Cause cause) { 241 _driver->collect(cause); 242 } 243 244 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 245 // These collection requests are ignored since ZGC can't run a synchronous 246 // GC cycle from within the VM thread. This is considered benign, since the 247 // only GC causes coming in here should be heap dumper and heap inspector. 248 // However, neither the heap dumper nor the heap inspector really need a GC 249 // to happen, but the result of their heap iterations might in that case be 250 // less accurate since they might include objects that would otherwise have 251 // been collected by a GC. 252 assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); 253 guarantee(cause == GCCause::_heap_dump || 254 cause == GCCause::_heap_inspection, "Invalid cause"); 255 } 256 257 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) { 258 // Not supported 259 ShouldNotReachHere(); 260 } 261 262 bool ZCollectedHeap::supports_tlab_allocation() const { 263 return true; 264 } 265 266 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const { 267 return _heap.tlab_capacity(); 268 } 269 270 size_t ZCollectedHeap::tlab_used(Thread* ignored) const { 271 return _heap.tlab_used(); 272 } 273 274 size_t ZCollectedHeap::max_tlab_size() const { 275 return _heap.max_tlab_size(); 276 } 277 278 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 279 return _heap.unsafe_max_tlab_alloc(); 280 } 281 282 bool ZCollectedHeap::can_elide_tlab_store_barriers() const { 283 return false; 284 } 285 286 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 287 // Not supported 288 ShouldNotReachHere(); 289 return true; 290 } 291 292 bool ZCollectedHeap::card_mark_must_follow_store() const { 293 // Not supported 294 ShouldNotReachHere(); 295 return false; 296 } 297 298 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() { 299 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager()); 300 } 301 302 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() { 303 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool()); 304 } 305 306 void ZCollectedHeap::object_iterate(ObjectClosure* cl) { 307 _heap.object_iterate(cl, true /* visit_referents */); 308 } 309 310 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 311 _heap.object_iterate(cl, true /* visit_referents */); 312 } 313 314 HeapWord* ZCollectedHeap::block_start(const void* addr) const { 315 return (HeapWord*)_heap.block_start((uintptr_t)addr); 316 } 317 318 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const { 319 return _heap.block_is_obj((uintptr_t)addr); 320 } 321 322 void ZCollectedHeap::register_nmethod(nmethod* nm) { 323 ZNMethod::register_nmethod(nm); 324 } 325 326 void ZCollectedHeap::unregister_nmethod(nmethod* nm) { 327 ZNMethod::unregister_nmethod(nm); 328 } 329 330 void ZCollectedHeap::flush_nmethod(nmethod* nm) { 331 ZNMethod::flush_nmethod(nm); 332 } 333 334 void ZCollectedHeap::verify_nmethod(nmethod* nm) { 335 // Does nothing 336 } 337 338 WorkGang* ZCollectedHeap::get_safepoint_workers() { 339 return _runtime_workers.workers(); 340 } 341 342 jlong ZCollectedHeap::millis_since_last_gc() { 343 return ZStatCycle::time_since_last() / MILLIUNITS; 344 } 345 346 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { 347 tc->do_thread(_director); 348 tc->do_thread(_driver); 349 tc->do_thread(_uncommitter); 350 tc->do_thread(_stat); 351 _heap.worker_threads_do(tc); 352 _runtime_workers.threads_do(tc); 353 } 354 355 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() { 356 const size_t capacity_in_words = capacity() / HeapWordSize; 357 const size_t max_capacity_in_words = max_capacity() / HeapWordSize; 358 return VirtualSpaceSummary(reserved_region().start(), 359 reserved_region().start() + capacity_in_words, 360 reserved_region().start() + max_capacity_in_words); 361 } 362 363 void ZCollectedHeap::safepoint_synchronize_begin() { 364 SuspendibleThreadSet::synchronize(); 365 } 366 367 void ZCollectedHeap::safepoint_synchronize_end() { 368 SuspendibleThreadSet::desynchronize(); 369 } 370 371 void ZCollectedHeap::prepare_for_verify() { 372 // Does nothing 373 } 374 375 void ZCollectedHeap::print_on(outputStream* st) const { 376 _heap.print_on(st); 377 } 378 379 void ZCollectedHeap::print_on_error(outputStream* st) const { 380 CollectedHeap::print_on_error(st); 381 382 st->print_cr("Address Space"); 383 st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart); 384 st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd); 385 st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize); 386 st->print_cr( "Heap"); 387 st->print_cr( " GlobalPhase: %u", ZGlobalPhase); 388 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum); 389 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax); 390 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall); 391 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium); 392 st->print_cr( "Metadata Bits"); 393 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask); 394 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask); 395 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask); 396 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked); 397 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped); 398 } 399 400 void ZCollectedHeap::print_extended_on(outputStream* st) const { 401 _heap.print_extended_on(st); 402 } 403 404 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const { 405 _director->print_on(st); 406 st->cr(); 407 _driver->print_on(st); 408 st->cr(); 409 _uncommitter->print_on(st); 410 st->cr(); 411 _stat->print_on(st); 412 st->cr(); 413 _heap.print_worker_threads_on(st); 414 _runtime_workers.print_threads_on(st); 415 } 416 417 void ZCollectedHeap::print_tracing_info() const { 418 // Does nothing 419 } 420 421 void ZCollectedHeap::verify(VerifyOption option /* ignored */) { 422 _heap.verify(); 423 } 424 425 bool ZCollectedHeap::is_oop(oop object) const { 426 return CollectedHeap::is_oop(object) && _heap.is_oop(object); 427 }