< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 48000 : [mq]: open.patch

*** 42,51 **** --- 42,52 ---- #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1HeapTransition.hpp" #include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1HotCardCache.hpp" + #include "gc/g1/g1MemoryPool.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RemSet.hpp"
*** 1227,1237 **** } const bool do_clear_all_soft_refs = clear_all_soft_refs || collector_policy()->should_clear_all_soft_refs(); ! G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs); GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); collector.prepare_collection(); collector.collect(); collector.complete_collection(); --- 1228,1238 ---- } const bool do_clear_all_soft_refs = clear_all_soft_refs || collector_policy()->should_clear_all_soft_refs(); ! G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs); GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); collector.prepare_collection(); collector.collect(); collector.complete_collection();
*** 1524,1533 **** --- 1525,1539 ---- G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _young_gen_sampling_thread(NULL), _collector_policy(collector_policy), + _memory_manager("G1 Young Generation", "end of minor GC"), + _full_gc_memory_manager("G1 Old Generation", "end of major GC"), + _eden_pool(NULL), + _survivor_pool(NULL), + _old_pool(NULL), _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), _g1_policy(create_g1_policy(_gc_timer_stw)), _collection_set(this, _g1_policy), _dirty_card_queue_set(false),
*** 1828,1837 **** --- 1834,1857 ---- _collection_set.initialize(max_regions()); return JNI_OK; } + void G1CollectedHeap::initialize_serviceability() { + _eden_pool = new G1EdenPool(this); + _survivor_pool = new G1SurvivorPool(this); + _old_pool = new G1OldGenPool(this); + + _full_gc_memory_manager.add_pool(_eden_pool); + _full_gc_memory_manager.add_pool(_survivor_pool); + _full_gc_memory_manager.add_pool(_old_pool); + + _memory_manager.add_pool(_eden_pool); + _memory_manager.add_pool(_survivor_pool); + + } + void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads // do not continue to execute and access resources (e.g. logging) // that are destroyed during shutdown. _cr->stop();
*** 1853,1862 **** --- 1873,1883 ---- size_t G1CollectedHeap::conservative_max_heap_alignment() { return HeapRegion::max_region_size(); } void G1CollectedHeap::post_initialize() { + CollectedHeap::post_initialize(); ref_processing_init(); } void G1CollectedHeap::ref_processing_init() { // Reference processing in G1 currently works as follows:
*** 2952,2962 **** Threads::number_of_non_daemon_threads()); workers()->update_active_workers(active_workers); log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers()); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); ! TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); // If the secondary_free_list is not empty, append it to the // free_list. No need to wait for the cleanup operation to finish; // the region allocation code will check the secondary_free_list // and wait if necessary. If the G1StressConcRegionFreeing flag is --- 2973,2983 ---- Threads::number_of_non_daemon_threads()); workers()->update_active_workers(active_workers); log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers()); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); ! TraceMemoryManagerStats tms(&_memory_manager, gc_cause()); // If the secondary_free_list is not empty, append it to the // free_list. No need to wait for the cleanup operation to finish; // the region allocation code will check the secondary_free_list // and wait if necessary. If the G1StressConcRegionFreeing flag is
*** 5366,5370 **** --- 5387,5406 ---- void G1CollectedHeap::rebuild_strong_code_roots() { RebuildStrongCodeRootClosure blob_cl(this); CodeCache::blobs_do(&blob_cl); } + + GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() { + GrowableArray<GCMemoryManager*> memory_managers(2); + memory_managers.append(&_memory_manager); + memory_managers.append(&_full_gc_memory_manager); + return memory_managers; + } + + GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() { + GrowableArray<MemoryPool*> memory_pools(3); + memory_pools.append(_eden_pool); + memory_pools.append(_survivor_pool); + memory_pools.append(_old_pool); + return memory_pools; + }
< prev index next >