# HG changeset patch # User pliden # Date 1428675713 -7200 # Fri Apr 10 16:21:53 2015 +0200 # Node ID 6a71c02ef0a80a7e6861943affbfa4ef1fe780f5 # Parent de9cb3e4eb23266a1e2cb9c41127028ebd3015e9 imported patch heap_statics_remove diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -409,10 +409,6 @@ return !hr->is_humongous(); } -// Private class members. - -G1CollectedHeap* G1CollectedHeap::_g1h; - // Private methods. HeapRegion* @@ -1768,14 +1764,12 @@ _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { - _g1h = this; - _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); _workers->initialize_workers(); - _allocator = G1Allocator::create_allocator(_g1h); + _allocator = G1Allocator::create_allocator(this); _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; int n_queues = MAX2((int)ParallelGCThreads, 1); @@ -1937,8 +1931,6 @@ _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage); - _g1h = this; - _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); @@ -3317,9 +3309,10 @@ #endif // PRODUCT G1CollectedHeap* G1CollectedHeap::heap() { - assert(_g1h != NULL, "Uninitialized access to G1CollectedHeap::heap()"); - assert(_g1h->kind() == CollectedHeap::G1CollectedHeap, "Not a G1 heap"); - return _g1h; + G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); + assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()"); + assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap"); + return heap; } void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { @@ -4837,7 +4830,7 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { { - uint n_workers = _g1h->workers()->active_workers(); + uint n_workers = workers()->active_workers(); G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); set_par_threads(n_workers); workers()->run_task(&g1_unlink_task); @@ -4869,7 +4862,7 @@ void G1CollectedHeap::redirty_logged_cards() { double redirty_logged_cards_start = os::elapsedTime(); - uint n_workers = _g1h->workers()->active_workers(); + uint n_workers = workers()->active_workers(); G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set()); dirty_card_queue_set().reset_for_par_iteration(); @@ -5302,7 +5295,7 @@ OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - if (_g1h->g1_policy()->during_initial_mark_pause()) { + if (g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; } @@ -6056,12 +6049,12 @@ HeapRegionSetCount empty_set; remove_from_old_sets(empty_set, cl.humongous_free_count()); - G1HRPrinter* hr_printer = _g1h->hr_printer(); - if (hr_printer->is_active()) { + G1HRPrinter* hrp = hr_printer(); + if (hrp->is_active()) { FreeRegionListIterator iter(&local_cleanup_list); while (iter.more_available()) { HeapRegion* hr = iter.get_next(); - hr_printer->cleanup(hr); + hrp->cleanup(hr); } } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -202,9 +202,6 @@ friend class G1CheckCSetFastTableClosure; private: - // The one and only G1CollectedHeap, so static functions can find it. - static G1CollectedHeap* _g1h; - FlexibleWorkGang* _workers; static size_t _humongous_object_threshold_in_words; diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -49,7 +49,6 @@ PSOldGen* ParallelScavengeHeap::_old_gen = NULL; PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; -ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; jint ParallelScavengeHeap::initialize() { @@ -89,7 +88,6 @@ double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; - _psh = this; _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); _old_gen = _gens->old_gen(); @@ -650,9 +648,10 @@ } ParallelScavengeHeap* ParallelScavengeHeap::heap() { - assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); - assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); - return _psh; + ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); + assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap"); + return heap; } // Before delegating the resize to the young generation, diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp @@ -53,8 +53,6 @@ static PSAdaptiveSizePolicy* _size_policy; static PSGCAdaptivePolicyCounters* _gc_policy_counters; - static ParallelScavengeHeap* _psh; - GenerationSizer* _collector_policy; // Collection of generations that are adjacent in the diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp b/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp @@ -61,7 +61,6 @@ \ static_field(ParallelScavengeHeap, _young_gen, PSYoungGen*) \ static_field(ParallelScavengeHeap, _old_gen, PSOldGen*) \ - static_field(ParallelScavengeHeap, _psh, ParallelScavengeHeap*) \ \ #define VM_TYPES_PARALLELGC(declare_type, \ diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -58,7 +58,6 @@ #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #endif // INCLUDE_ALL_GCS -GenCollectedHeap* GenCollectedHeap::_gch; NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) // The set of potentially parallel tasks in root scanning. @@ -126,8 +125,6 @@ _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); - _gch = this; - ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); @@ -1113,9 +1110,10 @@ } GenCollectedHeap* GenCollectedHeap::heap() { - assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); - assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); - return _gch; + GenCollectedHeap* heap = (GenCollectedHeap*)Universe::heap(); + assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()"); + assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap"); + return heap; } void GenCollectedHeap::prepare_for_compaction() { diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -54,11 +54,7 @@ public: friend class VM_PopulateDumpSharedSpace; - protected: - // Fields: - static GenCollectedHeap* _gch; - - private: +private: Generation* _young_gen; Generation* _old_gen; diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp +++ b/src/share/vm/runtime/vmStructs.cpp @@ -555,7 +555,6 @@ nonstatic_field(GenerationSpec, _init_size, size_t) \ nonstatic_field(GenerationSpec, _max_size, size_t) \ \ - static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \ nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \ nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \ \