# HG changeset patch # User pliden # Date 1428674861 -7200 # Fri Apr 10 16:07:41 2015 +0200 # Node ID dd7763176efa977a2befd0f99d3283f40a10f75c # Parent 123c1ff593e667d3eb0540358bfb58bb2ab29c32 imported patch parallelscavenge_cleanup diff --git a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp @@ -89,7 +89,7 @@ assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; @@ -101,7 +101,7 @@ return uncommitted_bytes; } - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t gen_alignment = heap->generation_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = diff --git a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp @@ -73,7 +73,7 @@ size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t result = gen_size_limit() - current_committed_size; size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; @@ -91,7 +91,7 @@ if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t eden_alignment = heap->space_alignment(); const size_t gen_alignment = heap->generation_alignment(); @@ -128,7 +128,7 @@ // If to_space is below from_space, to_space is not considered. // to_space can be. size_t ASPSYoungGen::available_to_live() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t alignment = heap->space_alignment(); // Include any space that is committed but is not in eden. @@ -292,7 +292,7 @@ assert(eden_start < from_start, "Cannot push into from_space"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); @@ -345,8 +345,6 @@ // Does the optimal to-space overlap from-space? if (to_start < (char*)from_space()->end()) { - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - // Calculate the minimum offset possible for from_end size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char)); @@ -509,9 +507,7 @@ assert(from_space()->top() == old_from_top, "from top changed!"); if (PrintAdaptiveSizePolicy) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: " "collection: %d " "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> " @@ -542,7 +538,7 @@ } MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - Universe::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); space_invariants(); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp @@ -76,9 +76,7 @@ public: CheckForUnmarkedObjects() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _young_gen = heap->young_gen(); _card_table = barrier_set_cast(heap->barrier_set()); // No point in asserting barrier set type here. Need to make CardTableExtension @@ -325,9 +323,7 @@ void CardTableExtension::verify_all_young_refs_imprecise() { CheckForUnmarkedObjects check; - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); old_gen->object_iterate(&check); @@ -335,9 +331,7 @@ // This should be called immediately after a scavenge, before mutators resume. void CardTableExtension::verify_all_young_refs_precise() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); CheckForPreciseMarks check( @@ -351,7 +345,7 @@ void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { CardTableExtension* card_table = - barrier_set_cast(Universe::heap()->barrier_set()); + barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set()); jbyte* bot = card_table->byte_for(mr.start()); jbyte* top = card_table->byte_for(mr.end()); @@ -523,7 +517,7 @@ cur_committed = new_committed; } #ifdef ASSERT - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); assert(cur_committed.start() == (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), os::vm_page_size()), diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -89,6 +89,7 @@ double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; + _psh = this; _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); _old_gen = _gens->old_gen(); @@ -114,7 +115,6 @@ // initialize the policy counters - 2 collectors, 3 generations _gc_policy_counters = new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); - _psh = this; // Set up the GCTaskManager _gc_task_manager = GCTaskManager::create(ParallelGCThreads); @@ -259,7 +259,7 @@ // total_collections() value! { MutexLocker ml(Heap_lock); - gc_count = Universe::heap()->total_collections(); + gc_count = total_collections(); result = young_gen()->allocate(size); if (result != NULL) { @@ -309,8 +309,7 @@ // This prevents us from looping until time out on requests that can // not be satisfied. if (op.prologue_succeeded()) { - assert(Universe::heap()->is_in_or_null(op.result()), - "result not in heap"); + assert(is_in_or_null(op.result()), "result not in heap"); // If GC was locked out during VM operation then retry allocation // and/or stall as necessary. @@ -420,7 +419,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); - assert(!Universe::heap()->is_gc_active(), "not reentrant"); + assert(!is_gc_active(), "not reentrant"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); // We assume that allocation in eden will fail unless we collect. @@ -508,8 +507,8 @@ { MutexLocker ml(Heap_lock); // This value is guarded by the Heap_lock - gc_count = Universe::heap()->total_collections(); - full_gc_count = Universe::heap()->total_full_collections(); + gc_count = total_collections(); + full_gc_count = total_full_collections(); } VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp @@ -48,7 +48,7 @@ // void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); ResourceMark rm; @@ -79,7 +79,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); @@ -150,7 +150,7 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("RefProcTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); @@ -168,7 +168,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task) { - ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); RegionTaskQueueSet* qset = ParCompactionManager::region_array(); @@ -189,7 +189,7 @@ void RefProcTaskExecutor::execute(EnqueueTask& task) { - ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; iis_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); @@ -238,7 +238,7 @@ _terminator(t) {} void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); @@ -320,7 +320,7 @@ } void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @@ -60,8 +60,7 @@ _region_stack(NULL), _region_stack_index((uint)max_uintx) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _old_gen = heap->old_gen(); _start_array = old_gen()->start_array(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @@ -59,7 +59,7 @@ CollectorCounters* PSMarkSweep::_counters = NULL; void PSMarkSweep::initialize() { - MemRegion mr = Universe::heap()->reserved_region(); + MemRegion mr = ParallelScavengeHeap::heap()->reserved_region(); _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc _counters = new CollectorCounters("PSMarkSweep", 1); } @@ -81,9 +81,9 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); - assert(!Universe::heap()->is_gc_active(), "not reentrant"); + assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; @@ -110,8 +110,7 @@ return false; } - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); _gc_timer->register_gc_start(); @@ -487,9 +486,7 @@ } void PSMarkSweep::allocate_stacks() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); MutableSpace* to_space = young_gen->to_space(); @@ -515,8 +512,7 @@ GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); // Need to clear claim bits before the tracing starts. ClassLoaderDataGraph::clear_claimed_marks(); @@ -582,9 +578,7 @@ // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); // Begin compacting into the old gen @@ -606,9 +600,7 @@ GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); @@ -651,9 +643,7 @@ // All pointers are now adjusted, move objects accordingly - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp @@ -38,15 +38,12 @@ void PSMarkSweepDecorator::set_destination_decorator_tenured() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _destination_decorator = heap->old_gen()->object_mark_sweep(); } void PSMarkSweepDecorator::advance_destination_decorator() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); assert(_destination_decorator != NULL, "Sanity"); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp @@ -107,20 +107,22 @@ SpaceMangler::mangle_region(cmr); } - Universe::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); + BarrierSet* bs = heap->barrier_set(); - CardTableModRefBS* _ct = - barrier_set_cast(Universe::heap()->barrier_set()); + bs->resize_covered_region(cmr); + + CardTableModRefBS* ct = barrier_set_cast(bs); // Verify that the start and end of this generation is the start of a card. // If this wasn't true, a single card could span more than one generation, // which would cause problems when we commit/uncommit memory, and when we // clear and dirty cards. - guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); - if (_reserved.end() != Universe::heap()->reserved_region().end()) { + guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); + if (_reserved.end() != heap->reserved_region().end()) { // Don't check at the very end of the heap as we'll assert that we're probing off // the end if we try. - guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); + guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); } // @@ -161,8 +163,7 @@ } void PSOldGen::precompact() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); // Reset start array first. start_array()->reset(); @@ -197,7 +198,7 @@ // Allocations in the old generation need to be reported if (res != NULL) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); heap->size_policy()->tenured_allocation(word_size); } @@ -376,8 +377,7 @@ } if (PrintAdaptiveSizePolicy) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: " "collection: %d " "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", @@ -397,7 +397,7 @@ size_t new_word_size = new_memregion.word_size(); start_array()->set_covered_region(new_memregion); - Universe::heap()->barrier_set()->resize_covered_region(new_memregion); + ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion); // ALWAYS do this last!! object_space()->initialize(new_memregion, diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -748,7 +748,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { assert(addr != NULL, "Should detect NULL oop earlier"); - assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap"); + assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap"); assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked"); // Region covering the object. @@ -836,9 +836,7 @@ } void PSParallelCompact::post_initialize() { - ParallelScavengeHeap* heap = gc_heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MemRegion mr = heap->reserved_region(); _ref_processor = new ReferenceProcessor(mr, // span @@ -855,8 +853,7 @@ } bool PSParallelCompact::initialize() { - ParallelScavengeHeap* heap = gc_heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MemRegion mr = heap->reserved_region(); // Was the old gen get allocated successfully? @@ -890,7 +887,7 @@ { memset(&_space_info, 0, sizeof(_space_info)); - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); _space_info[old_space_id].set_space(heap->old_gen()->object_space()); @@ -973,7 +970,7 @@ // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -1028,7 +1025,7 @@ MutableSpace* const from_space = _space_info[from_space_id].space(); MutableSpace* const to_space = _space_info[to_space_id].space(); - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); bool eden_empty = eden_space->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(heap->size_policy(), @@ -1966,7 +1963,7 @@ assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(!heap->is_gc_active(), "not reentrant"); @@ -1994,7 +1991,7 @@ return false; } - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _gc_timer.register_gc_start(); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); @@ -2347,7 +2344,7 @@ // Recursively traverse all live objects and mark them GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); - ParallelScavengeHeap* heap = gc_heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); @@ -2687,8 +2684,7 @@ // trace("5"); GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2839,7 +2835,7 @@ // heap, last_space_id is returned. In debug mode it expects the address to be // in the heap and asserts such. PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { - assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); + assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap"); for (unsigned int id = old_space_id; id < last_space_id; ++id) { if (_space_info[id].space()->contains(addr)) { diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP #include "gc_implementation/parallelScavenge/objectStartArray.hpp" +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" #include "gc_implementation/shared/collectorCounters.hpp" @@ -1168,11 +1169,6 @@ PSParallelCompact(); - // Convenient accessor for Universe::heap(). - static ParallelScavengeHeap* gc_heap() { - return (ParallelScavengeHeap*)Universe::heap(); - } - static void invoke(bool maximum_heap_compaction); static bool invoke_no_policy(bool maximum_heap_compaction); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_interface/collectedHeap.hpp" @@ -36,7 +37,7 @@ T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(obj), "should be in heap"); + assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { cm->push(obj); @@ -62,14 +63,14 @@ T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(obj), "should be in heap"); + assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); oop new_obj = (oop)summary_data().calc_new_pointer(obj); assert(new_obj != NULL, // is forwarding ptr? "should be forwarded"); // Just always do the update unconditionally? if (new_obj != NULL) { - assert(Universe::heap()->is_in_reserved(new_obj), + assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj), "should be in object space"); oopDesc::encode_store_heap_oop_not_null(p, new_obj); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp @@ -103,7 +103,7 @@ } bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) { - assert(Universe::heap()->is_in(obj), "Object outside heap"); + assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap"); if (contains(obj)) { HeapWord* object_end = obj + obj_size; @@ -137,9 +137,7 @@ #ifdef ASSERT bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MutableSpace* to_space = heap->young_gen()->to_space(); MemRegion used = to_space->used_region(); if (used.contains(lab)) { @@ -150,10 +148,9 @@ } bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(_start_array->covered_region().contains(lab), "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); MemRegion used = old_gen->object_space()->used_region(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @@ -44,8 +44,7 @@ MutableSpace* PSPromotionManager::_young_space = NULL; void PSPromotionManager::initialize() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _old_gen = heap->old_gen(); _young_space = heap->young_gen()->to_space(); @@ -88,8 +87,7 @@ } void PSPromotionManager::pre_scavenge() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _young_space = heap->young_gen()->to_space(); @@ -132,7 +130,7 @@ void PSPromotionManager::print_taskqueue_stats(outputStream* const out) { out->print_cr("== GC Tasks Stats, GC %3d", - Universe::heap()->total_collections()); + ParallelScavengeHeap::heap()->total_collections()); TaskQueueStats totals; out->print("thr "); TaskQueueStats::print_header(1, out); out->cr(); @@ -160,8 +158,7 @@ #endif // TASKQUEUE_STATS PSPromotionManager::PSPromotionManager() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); // We set the old lab's start array. _old_lab.set_start_array(old_gen()->start_array()); @@ -191,8 +188,7 @@ // We need to get an assert in here to make sure the labs are always flushed. - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); // Do not prefill the LAB's, save heap wastage! HeapWord* lab_base = young_space()->top(); @@ -213,8 +209,7 @@ totally_drain = totally_drain || _totally_drain; #ifdef ASSERT - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); MutableSpace* to_space = heap->young_gen()->to_space(); MutableSpace* old_space = heap->old_gen()->object_space(); #endif /* ASSERT */ diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" @@ -57,9 +58,7 @@ template inline void PSPromotionManager::claim_or_forward_depth(T* p) { assert(should_scavenge(p, true), "revisiting object?"); - assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, - "Sanity"); - assert(Universe::heap()->is_in(p), "pointer outside heap"); + assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap"); claim_or_forward_internal_depth(p); } @@ -150,7 +149,7 @@ // Otherwise try allocating obj tenured if (new_obj == NULL) { #ifndef PRODUCT - if (Universe::heap()->promotion_should_fail()) { + if (ParallelScavengeHeap::heap()->promotion_should_fail()) { return oop_promotion_failed(o, test_mark); } #endif // #ifndef PRODUCT @@ -296,7 +295,7 @@ // that are outside the heap. These pointers are either from roots // or from metadata. if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && - Universe::heap()->is_in_reserved(p)) { + ParallelScavengeHeap::heap()->is_in_reserved(p)) { if (PSScavenge::is_obj_in_young(new_obj)) { PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @@ -87,8 +87,7 @@ public: PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _to_space = heap->young_gen()->to_space(); assert(_promotion_manager != NULL, "Sanity"); @@ -218,11 +217,9 @@ bool PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); - assert(!Universe::heap()->is_gc_active(), "not reentrant"); + assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); - ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; @@ -273,9 +270,8 @@ return false; } - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCause::Cause gc_cause = heap->gc_cause(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { @@ -713,9 +709,7 @@ // unforwarding markOops. It then restores any preserved mark oops, // and clears the _preserved_mark_stack. void PSScavenge::clean_up_failed_promotion() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); { @@ -742,7 +736,7 @@ } // Reset the PromotionFailureALot counters. - NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) + NOT_PRODUCT(heap->reset_promotion_should_fail();) } // This method is called whenever an attempt to promote an object @@ -761,8 +755,7 @@ } bool PSScavenge::should_attempt_scavenge() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); if (UsePerfData) { @@ -838,9 +831,7 @@ MaxTenuringThreshold; } - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp @@ -33,7 +33,7 @@ #include "utilities/globalDefinitions.hpp" inline void PSScavenge::save_to_space_top_before_gc() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _to_space_top_before_gc = heap->young_gen()->to_space()->top(); } @@ -56,7 +56,7 @@ template inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) { if (check_to_space) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); return should_scavenge(p, heap->young_gen()->to_space()); } return should_scavenge(p); @@ -97,7 +97,6 @@ ParallelScavengeHeap* psh = ParallelScavengeHeap::heap(); assert(!psh->is_in_reserved(p), "GC barrier needed"); if (PSScavenge::should_scavenge(p)) { - assert(!Universe::heap()->is_in_reserved(p), "Not from meta-data?"); assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); oop o = *p; diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp @@ -47,7 +47,7 @@ // void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); PSScavengeRootsClosure roots_closure(pm); @@ -118,7 +118,7 @@ // void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); PSScavengeRootsClosure roots_closure(pm); @@ -143,7 +143,7 @@ _terminator(t) {} void StealTask::do_it(GCTaskManager* manager, uint which) { - assert(Universe::heap()->is_gc_active(), "called outside gc"); + assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); @@ -181,10 +181,8 @@ { PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); - - assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); CardTableExtension* card_table = - barrier_set_cast(Universe::heap()->barrier_set()); + barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set()); card_table->scavenge_contents_parallel(_gen->start_array(), _gen->object_space(), diff --git a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp @@ -62,7 +62,7 @@ MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - Universe::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); if (ZapUnusedHeapArea) { // Mangle newly committed space immediately because it @@ -103,7 +103,7 @@ _max_gen_size, _virtual_space); // Compute maximum space sizes for performance counters - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t alignment = heap->space_alignment(); size_t size = virtual_space()->reserved_size(); @@ -153,8 +153,7 @@ } void PSYoungGen::compute_initial_space_boundaries() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); // Compute sizes size_t alignment = heap->space_alignment(); @@ -208,7 +207,7 @@ #ifndef PRODUCT void PSYoungGen::space_invariants() { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t alignment = heap->space_alignment(); // Currently, our eden size cannot shrink to zero @@ -494,7 +493,7 @@ char* to_start = (char*)to_space()->bottom(); char* to_end = (char*)to_space()->end(); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); @@ -546,8 +545,6 @@ // Does the optimal to-space overlap from-space? if (to_start < (char*)from_space()->end()) { - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - // Calculate the minimum offset possible for from_end size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char)); @@ -708,9 +705,7 @@ assert(from_space()->top() == old_from_top, "from top changed!"); if (PrintAdaptiveSizePolicy) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: " "collection: %d " "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> " @@ -843,7 +838,7 @@ // from-space. size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t space_alignment = heap->space_alignment(); const size_t gen_alignment = heap->generation_alignment(); @@ -927,7 +922,7 @@ MemRegion cmr((HeapWord*)virtual_space()->low(), (HeapWord*)virtual_space()->high()); - Universe::heap()->barrier_set()->resize_covered_region(cmr); + ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); space_invariants(); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp @@ -41,8 +41,7 @@ void VM_ParallelGCFailedAllocation::doit() { SvcGCMarker sgcm(SvcGCMarker::MINOR); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCauseSetter gccs(heap, _gc_cause); _result = heap->failed_mem_allocate(_word_size); @@ -63,9 +62,7 @@ void VM_ParallelGCSystemGC::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, - "must be a ParallelScavengeHeap"); + ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); GCCauseSetter gccs(heap, _gc_cause); if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc