1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "gc/parallel/adjoiningGenerations.hpp" 28 #include "gc/parallel/adjoiningVirtualSpaces.hpp" 29 #include "gc/parallel/cardTableExtension.hpp" 30 #include "gc/parallel/gcTaskManager.hpp" 31 #include "gc/parallel/generationSizer.hpp" 32 #include "gc/parallel/objectStartArray.inline.hpp" 33 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 34 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 35 #include "gc/parallel/psMarkSweep.hpp" 36 #include "gc/parallel/psMemoryPool.hpp" 37 #include "gc/parallel/psParallelCompact.inline.hpp" 38 #include "gc/parallel/psPromotionManager.hpp" 39 #include "gc/parallel/psScavenge.hpp" 40 #include "gc/parallel/vmPSOperations.hpp" 41 #include "gc/shared/gcHeapSummary.hpp" 42 #include "gc/shared/gcLocker.inline.hpp" 43 #include "gc/shared/gcWhen.hpp" 44 #include "logging/log.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/vmThread.hpp" 49 #include "services/memoryManager.hpp" 50 #include "services/memTracker.hpp" 51 #include "utilities/vmError.hpp" 52 53 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 54 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 55 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 56 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 57 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 58 59 jint ParallelScavengeHeap::initialize() { 60 CollectedHeap::pre_initialize(); 61 62 const size_t heap_size = _collector_policy->max_heap_byte_size(); 63 64 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); 65 66 os::trace_page_sizes("Heap", 67 _collector_policy->min_heap_byte_size(), 68 heap_size, 69 generation_alignment(), 70 heap_rs.base(), 71 heap_rs.size()); 72 73 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); 74 75 CardTableExtension* const barrier_set = new CardTableExtension(reserved_region()); 76 barrier_set->initialize(); 77 set_barrier_set(barrier_set); 78 79 // Make up the generations 80 // Calculate the maximum size that a generation can grow. This 81 // includes growth into the other generation. Note that the 82 // parameter _max_gen_size is kept as the maximum 83 // size of the generation as the boundaries currently stand. 84 // _max_gen_size is still used as that value. 85 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 86 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 87 88 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); 89 90 _old_gen = _gens->old_gen(); 91 _young_gen = _gens->young_gen(); 92 93 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 94 const size_t old_capacity = _old_gen->capacity_in_bytes(); 95 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 96 _size_policy = 97 new PSAdaptiveSizePolicy(eden_capacity, 98 initial_promo_size, 99 young_gen()->to_space()->capacity_in_bytes(), 100 _collector_policy->gen_alignment(), 101 max_gc_pause_sec, 102 max_gc_minor_pause_sec, 103 GCTimeRatio 104 ); 105 106 assert(!UseAdaptiveGCBoundary || 107 (old_gen()->virtual_space()->high_boundary() == 108 young_gen()->virtual_space()->low_boundary()), 109 "Boundaries must meet"); 110 // initialize the policy counters - 2 collectors, 3 generations 111 _gc_policy_counters = 112 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 113 114 // Set up the GCTaskManager 115 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 116 117 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 118 return JNI_ENOMEM; 119 } 120 121 return JNI_OK; 122 } 123 124 void ParallelScavengeHeap::post_initialize() { 125 // Need to init the tenuring threshold 126 PSScavenge::initialize(); 127 if (UseParallelOldGC) { 128 PSParallelCompact::post_initialize(); 129 } else { 130 PSMarkSweep::initialize(); 131 } 132 PSPromotionManager::initialize(); 133 } 134 135 void ParallelScavengeHeap::update_counters() { 136 young_gen()->update_counters(); 137 old_gen()->update_counters(); 138 MetaspaceCounters::update_performance_counters(); 139 CompressedClassSpaceCounters::update_performance_counters(); 140 } 141 142 size_t ParallelScavengeHeap::capacity() const { 143 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 144 return value; 145 } 146 147 size_t ParallelScavengeHeap::used() const { 148 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 149 return value; 150 } 151 152 bool ParallelScavengeHeap::is_maximal_no_gc() const { 153 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 154 } 155 156 157 size_t ParallelScavengeHeap::max_capacity() const { 158 size_t estimated = reserved_region().byte_size(); 159 if (UseAdaptiveSizePolicy) { 160 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 161 } else { 162 estimated -= young_gen()->to_space()->capacity_in_bytes(); 163 } 164 return MAX2(estimated, capacity()); 165 } 166 167 bool ParallelScavengeHeap::is_in(const void* p) const { 168 return young_gen()->is_in(p) || old_gen()->is_in(p); 169 } 170 171 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 172 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 173 } 174 175 // There are two levels of allocation policy here. 176 // 177 // When an allocation request fails, the requesting thread must invoke a VM 178 // operation, transfer control to the VM thread, and await the results of a 179 // garbage collection. That is quite expensive, and we should avoid doing it 180 // multiple times if possible. 181 // 182 // To accomplish this, we have a basic allocation policy, and also a 183 // failed allocation policy. 184 // 185 // The basic allocation policy controls how you allocate memory without 186 // attempting garbage collection. It is okay to grab locks and 187 // expand the heap, if that can be done without coming to a safepoint. 188 // It is likely that the basic allocation policy will not be very 189 // aggressive. 190 // 191 // The failed allocation policy is invoked from the VM thread after 192 // the basic allocation policy is unable to satisfy a mem_allocate 193 // request. This policy needs to cover the entire range of collection, 194 // heap expansion, and out-of-memory conditions. It should make every 195 // attempt to allocate the requested memory. 196 197 // Basic allocation policy. Should never be called at a safepoint, or 198 // from the VM thread. 199 // 200 // This method must handle cases where many mem_allocate requests fail 201 // simultaneously. When that happens, only one VM operation will succeed, 202 // and the rest will not be executed. For that reason, this method loops 203 // during failed allocation attempts. If the java heap becomes exhausted, 204 // we rely on the size_policy object to force a bail out. 205 HeapWord* ParallelScavengeHeap::mem_allocate( 206 size_t size, 207 bool* gc_overhead_limit_was_exceeded) { 208 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 209 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 210 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 211 212 // In general gc_overhead_limit_was_exceeded should be false so 213 // set it so here and reset it to true only if the gc time 214 // limit is being exceeded as checked below. 215 *gc_overhead_limit_was_exceeded = false; 216 217 HeapWord* result = young_gen()->allocate(size); 218 219 uint loop_count = 0; 220 uint gc_count = 0; 221 uint gclocker_stalled_count = 0; 222 223 while (result == NULL) { 224 // We don't want to have multiple collections for a single filled generation. 225 // To prevent this, each thread tracks the total_collections() value, and if 226 // the count has changed, does not do a new collection. 227 // 228 // The collection count must be read only while holding the heap lock. VM 229 // operations also hold the heap lock during collections. There is a lock 230 // contention case where thread A blocks waiting on the Heap_lock, while 231 // thread B is holding it doing a collection. When thread A gets the lock, 232 // the collection count has already changed. To prevent duplicate collections, 233 // The policy MUST attempt allocations during the same period it reads the 234 // total_collections() value! 235 { 236 MutexLocker ml(Heap_lock); 237 gc_count = total_collections(); 238 239 result = young_gen()->allocate(size); 240 if (result != NULL) { 241 return result; 242 } 243 244 // If certain conditions hold, try allocating from the old gen. 245 result = mem_allocate_old_gen(size); 246 if (result != NULL) { 247 return result; 248 } 249 250 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 251 return NULL; 252 } 253 254 // Failed to allocate without a gc. 255 if (GCLocker::is_active_and_needs_gc()) { 256 // If this thread is not in a jni critical section, we stall 257 // the requestor until the critical section has cleared and 258 // GC allowed. When the critical section clears, a GC is 259 // initiated by the last thread exiting the critical section; so 260 // we retry the allocation sequence from the beginning of the loop, 261 // rather than causing more, now probably unnecessary, GC attempts. 262 JavaThread* jthr = JavaThread::current(); 263 if (!jthr->in_critical()) { 264 MutexUnlocker mul(Heap_lock); 265 GCLocker::stall_until_clear(); 266 gclocker_stalled_count += 1; 267 continue; 268 } else { 269 if (CheckJNICalls) { 270 fatal("Possible deadlock due to allocating while" 271 " in jni critical section"); 272 } 273 return NULL; 274 } 275 } 276 } 277 278 if (result == NULL) { 279 // Generate a VM operation 280 VM_ParallelGCFailedAllocation op(size, gc_count); 281 VMThread::execute(&op); 282 283 // Did the VM operation execute? If so, return the result directly. 284 // This prevents us from looping until time out on requests that can 285 // not be satisfied. 286 if (op.prologue_succeeded()) { 287 assert(is_in_or_null(op.result()), "result not in heap"); 288 289 // If GC was locked out during VM operation then retry allocation 290 // and/or stall as necessary. 291 if (op.gc_locked()) { 292 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 293 continue; // retry and/or stall as necessary 294 } 295 296 // Exit the loop if the gc time limit has been exceeded. 297 // The allocation must have failed above ("result" guarding 298 // this path is NULL) and the most recent collection has exceeded the 299 // gc overhead limit (although enough may have been collected to 300 // satisfy the allocation). Exit the loop so that an out-of-memory 301 // will be thrown (return a NULL ignoring the contents of 302 // op.result()), 303 // but clear gc_overhead_limit_exceeded so that the next collection 304 // starts with a clean slate (i.e., forgets about previous overhead 305 // excesses). Fill op.result() with a filler object so that the 306 // heap remains parsable. 307 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 308 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 309 310 if (limit_exceeded && softrefs_clear) { 311 *gc_overhead_limit_was_exceeded = true; 312 size_policy()->set_gc_overhead_limit_exceeded(false); 313 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set"); 314 if (op.result() != NULL) { 315 CollectedHeap::fill_with_object(op.result(), size); 316 } 317 return NULL; 318 } 319 320 return op.result(); 321 } 322 } 323 324 // The policy object will prevent us from looping forever. If the 325 // time spent in gc crosses a threshold, we will bail out. 326 loop_count++; 327 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 328 (loop_count % QueuedAllocationWarningCount == 0)) { 329 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 330 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 331 } 332 } 333 334 return result; 335 } 336 337 // A "death march" is a series of ultra-slow allocations in which a full gc is 338 // done before each allocation, and after the full gc the allocation still 339 // cannot be satisfied from the young gen. This routine detects that condition; 340 // it should be called after a full gc has been done and the allocation 341 // attempted from the young gen. The parameter 'addr' should be the result of 342 // that young gen allocation attempt. 343 void 344 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 345 if (addr != NULL) { 346 _death_march_count = 0; // death march has ended 347 } else if (_death_march_count == 0) { 348 if (should_alloc_in_eden(size)) { 349 _death_march_count = 1; // death march has started 350 } 351 } 352 } 353 354 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 355 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 356 // Size is too big for eden, or gc is locked out. 357 return old_gen()->allocate(size); 358 } 359 360 // If a "death march" is in progress, allocate from the old gen a limited 361 // number of times before doing a GC. 362 if (_death_march_count > 0) { 363 if (_death_march_count < 64) { 364 ++_death_march_count; 365 return old_gen()->allocate(size); 366 } else { 367 _death_march_count = 0; 368 } 369 } 370 return NULL; 371 } 372 373 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 374 if (UseParallelOldGC) { 375 // The do_full_collection() parameter clear_all_soft_refs 376 // is interpreted here as maximum_compaction which will 377 // cause SoftRefs to be cleared. 378 bool maximum_compaction = clear_all_soft_refs; 379 PSParallelCompact::invoke(maximum_compaction); 380 } else { 381 PSMarkSweep::invoke(clear_all_soft_refs); 382 } 383 } 384 385 // Failed allocation policy. Must be called from the VM thread, and 386 // only at a safepoint! Note that this method has policy for allocation 387 // flow, and NOT collection policy. So we do not check for gc collection 388 // time over limit here, that is the responsibility of the heap specific 389 // collection methods. This method decides where to attempt allocations, 390 // and when to attempt collections, but no collection specific policy. 391 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 392 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 393 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 394 assert(!is_gc_active(), "not reentrant"); 395 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 396 397 // We assume that allocation in eden will fail unless we collect. 398 399 // First level allocation failure, scavenge and allocate in young gen. 400 GCCauseSetter gccs(this, GCCause::_allocation_failure); 401 const bool invoked_full_gc = PSScavenge::invoke(); 402 HeapWord* result = young_gen()->allocate(size); 403 404 // Second level allocation failure. 405 // Mark sweep and allocate in young generation. 406 if (result == NULL && !invoked_full_gc) { 407 do_full_collection(false); 408 result = young_gen()->allocate(size); 409 } 410 411 death_march_check(result, size); 412 413 // Third level allocation failure. 414 // After mark sweep and young generation allocation failure, 415 // allocate in old generation. 416 if (result == NULL) { 417 result = old_gen()->allocate(size); 418 } 419 420 // Fourth level allocation failure. We're running out of memory. 421 // More complete mark sweep and allocate in young generation. 422 if (result == NULL) { 423 do_full_collection(true); 424 result = young_gen()->allocate(size); 425 } 426 427 // Fifth level allocation failure. 428 // After more complete mark sweep, allocate in old generation. 429 if (result == NULL) { 430 result = old_gen()->allocate(size); 431 } 432 433 return result; 434 } 435 436 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 437 CollectedHeap::ensure_parsability(retire_tlabs); 438 young_gen()->eden_space()->ensure_parsability(); 439 } 440 441 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 442 return young_gen()->eden_space()->tlab_capacity(thr); 443 } 444 445 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 446 return young_gen()->eden_space()->tlab_used(thr); 447 } 448 449 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 450 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 451 } 452 453 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 454 return young_gen()->allocate(size); 455 } 456 457 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 458 CollectedHeap::accumulate_statistics_all_tlabs(); 459 } 460 461 void ParallelScavengeHeap::resize_all_tlabs() { 462 CollectedHeap::resize_all_tlabs(); 463 } 464 465 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 466 // We don't need barriers for stores to objects in the 467 // young gen and, a fortiori, for initializing stores to 468 // objects therein. 469 return is_in_young(new_obj); 470 } 471 472 // This method is used by System.gc() and JVMTI. 473 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 474 assert(!Heap_lock->owned_by_self(), 475 "this thread should not own the Heap_lock"); 476 477 uint gc_count = 0; 478 uint full_gc_count = 0; 479 { 480 MutexLocker ml(Heap_lock); 481 // This value is guarded by the Heap_lock 482 gc_count = total_collections(); 483 full_gc_count = total_full_collections(); 484 } 485 486 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 487 VMThread::execute(&op); 488 } 489 490 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 491 young_gen()->object_iterate(cl); 492 old_gen()->object_iterate(cl); 493 } 494 495 496 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 497 if (young_gen()->is_in_reserved(addr)) { 498 assert(young_gen()->is_in(addr), 499 "addr should be in allocated part of young gen"); 500 // called from os::print_location by find or VMError 501 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 502 Unimplemented(); 503 } else if (old_gen()->is_in_reserved(addr)) { 504 assert(old_gen()->is_in(addr), 505 "addr should be in allocated part of old gen"); 506 return old_gen()->start_array()->object_start((HeapWord*)addr); 507 } 508 return 0; 509 } 510 511 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 512 return oop(addr)->size(); 513 } 514 515 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 516 return block_start(addr) == addr; 517 } 518 519 jlong ParallelScavengeHeap::millis_since_last_gc() { 520 return UseParallelOldGC ? 521 PSParallelCompact::millis_since_last_gc() : 522 PSMarkSweep::millis_since_last_gc(); 523 } 524 525 void ParallelScavengeHeap::prepare_for_verify() { 526 ensure_parsability(false); // no need to retire TLABs for verification 527 } 528 529 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 530 PSOldGen* old = old_gen(); 531 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 532 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); 533 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); 534 535 PSYoungGen* young = young_gen(); 536 VirtualSpaceSummary young_summary(young->reserved().start(), 537 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 538 539 MutableSpace* eden = young_gen()->eden_space(); 540 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 541 542 MutableSpace* from = young_gen()->from_space(); 543 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 544 545 MutableSpace* to = young_gen()->to_space(); 546 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 547 548 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 549 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 550 } 551 552 void ParallelScavengeHeap::print_on(outputStream* st) const { 553 young_gen()->print_on(st); 554 old_gen()->print_on(st); 555 MetaspaceAux::print_on(st); 556 } 557 558 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 559 this->CollectedHeap::print_on_error(st); 560 561 if (UseParallelOldGC) { 562 st->cr(); 563 PSParallelCompact::print_on_error(st); 564 } 565 } 566 567 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 568 PSScavenge::gc_task_manager()->threads_do(tc); 569 } 570 571 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 572 PSScavenge::gc_task_manager()->print_threads_on(st); 573 } 574 575 void ParallelScavengeHeap::print_tracing_info() const { 576 AdaptiveSizePolicyOutput::print(); 577 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 578 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", 579 UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds()); 580 } 581 582 583 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 584 // Why do we need the total_collections()-filter below? 585 if (total_collections() > 0) { 586 log_debug(gc, verify)("Tenured"); 587 old_gen()->verify(); 588 589 log_debug(gc, verify)("Eden"); 590 young_gen()->verify(); 591 } 592 } 593 594 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 595 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 596 gc_tracer->report_gc_heap_summary(when, heap_summary); 597 598 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 599 gc_tracer->report_metaspace_summary(when, metaspace_summary); 600 } 601 602 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 603 CollectedHeap* heap = Universe::heap(); 604 assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 605 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap"); 606 return (ParallelScavengeHeap*)heap; 607 } 608 609 // Before delegating the resize to the young generation, 610 // the reserved space for the young and old generations 611 // may be changed to accommodate the desired resize. 612 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 613 size_t survivor_size) { 614 if (UseAdaptiveGCBoundary) { 615 if (size_policy()->bytes_absorbed_from_eden() != 0) { 616 size_policy()->reset_bytes_absorbed_from_eden(); 617 return; // The generation changed size already. 618 } 619 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 620 } 621 622 // Delegate the resize to the generation. 623 _young_gen->resize(eden_size, survivor_size); 624 } 625 626 // Before delegating the resize to the old generation, 627 // the reserved space for the young and old generations 628 // may be changed to accommodate the desired resize. 629 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 630 if (UseAdaptiveGCBoundary) { 631 if (size_policy()->bytes_absorbed_from_eden() != 0) { 632 size_policy()->reset_bytes_absorbed_from_eden(); 633 return; // The generation changed size already. 634 } 635 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 636 } 637 638 // Delegate the resize to the generation. 639 _old_gen->resize(desired_free_space); 640 } 641 642 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 643 // nothing particular 644 } 645 646 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 647 // nothing particular 648 } 649 650 #ifndef PRODUCT 651 void ParallelScavengeHeap::record_gen_tops_before_GC() { 652 if (ZapUnusedHeapArea) { 653 young_gen()->record_spaces_top(); 654 old_gen()->record_spaces_top(); 655 } 656 } 657 658 void ParallelScavengeHeap::gen_mangle_unused_area() { 659 if (ZapUnusedHeapArea) { 660 young_gen()->eden_space()->mangle_unused_area(); 661 young_gen()->to_space()->mangle_unused_area(); 662 young_gen()->from_space()->mangle_unused_area(); 663 old_gen()->object_space()->mangle_unused_area(); 664 } 665 } 666 #endif 667 668 bool ParallelScavengeHeap::is_scavengable(oop obj) { 669 return is_in_young(obj); 670 } 671 672 void ParallelScavengeHeap::register_nmethod(nmethod* nm) { 673 CodeCache::register_scavenge_root_nmethod(nm); 674 } 675 676 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { 677 CodeCache::verify_scavenge_root_nmethod(nm); 678 } 679 680 class PSScavengeMemoryManager : public GCMemoryManager { 681 private: 682 public: 683 PSScavengeMemoryManager() : GCMemoryManager() {} 684 685 const char* name() { return "PS Scavenge"; } 686 }; 687 688 class PSMarkSweepMemoryManager : public GCMemoryManager { 689 private: 690 public: 691 PSMarkSweepMemoryManager() : GCMemoryManager() {} 692 693 const char* name() { return "PS MarkSweep"; } 694 }; 695 696 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() { 697 _minor_mgr = new PSScavengeMemoryManager(); 698 _major_mgr = new PSMarkSweepMemoryManager(); 699 GrowableArray<GCMemoryManager*> mem_mgrs; 700 mem_mgrs.append(_minor_mgr); 701 mem_mgrs.append(_major_mgr); 702 return mem_mgrs; 703 } 704 705 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() { 706 PSYoungGen* young = young_gen(); 707 EdenMutableSpacePool* eden = new EdenMutableSpacePool(young, 708 young->eden_space(), 709 "PS Eden Space", 710 MemoryPool::Heap, 711 false /* support_usage_threshold */); 712 713 SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(young, 714 "PS Survivor Space", 715 MemoryPool::Heap, 716 false /* support_usage_threshold */); 717 718 PSGenerationPool* old_gen_pool = new PSGenerationPool(old_gen(), 719 "PS Old Gen", 720 MemoryPool::Heap, 721 true /* support_usage_threshold */); 722 723 _major_mgr->add_pool(eden); 724 _major_mgr->add_pool(survivor); 725 _major_mgr->add_pool(old_gen_pool); 726 727 _minor_mgr->add_pool(eden); 728 _minor_mgr->add_pool(survivor); 729 730 GrowableArray<MemoryPool*> mem_pools; 731 mem_pools.append(eden); 732 mem_pools.append(survivor); 733 mem_pools.append(old_gen_pool); 734 return mem_pools; 735 } 736