< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page
rev 12854 : [mq]: gcinterface.patch

*** 54,67 **** #include "services/management.hpp" #include "services/memoryService.hpp" #include "utilities/macros.hpp" #include "utilities/stack.inline.hpp" #include "utilities/vmError.hpp" - #if INCLUDE_ALL_GCS - #include "gc/cms/concurrentMarkSweepThread.hpp" - #include "gc/cms/vmCMSOperations.hpp" - #endif // INCLUDE_ALL_GCS NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) // The set of potentially parallel tasks in root scanning. enum GCH_strong_roots_tasks { --- 54,63 ----
*** 86,104 **** _gen_policy(policy), _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), _full_collections_completed(0) { assert(policy != NULL, "Sanity check"); ! if (UseConcMarkSweepGC) { ! _workers = new WorkGang("GC Thread", ParallelGCThreads, ! /* are_GC_task_threads */true, ! /* are_ConcurrentGC_threads */false); ! _workers->initialize_workers(); ! } else { // Serial GC does not use workers. _workers = NULL; - } } jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize(); --- 82,94 ---- _gen_policy(policy), _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), _full_collections_completed(0) { assert(policy != NULL, "Sanity check"); ! // Serial GC does not use workers. _workers = NULL; } jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize();
*** 124,156 **** return JNI_ENOMEM; } initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); ! _rem_set = collector_policy()->create_rem_set(reserved_region()); ! set_barrier_set(rem_set()->bs()); ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); clear_incremental_collection_failed(); - #if INCLUDE_ALL_GCS - // If we are running CMS, create the collector responsible - // for collecting the CMS generations. - if (collector_policy()->is_concurrent_mark_sweep_policy()) { - bool success = create_cms_collector(); - if (!success) return JNI_ENOMEM; - } - #endif // INCLUDE_ALL_GCS - return JNI_OK; } char* GenCollectedHeap::allocate(size_t alignment, ReservedSpace* heap_rs){ // Now figure out the total size. const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); --- 114,142 ---- return JNI_ENOMEM; } initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); ! CardTableModRefBSForCTRS* barrier_set = create_barrier_set(reserved_region()); ! _rem_set = collector_policy()->create_rem_set(reserved_region(), barrier_set); ! set_barrier_set(barrier_set); ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); clear_incremental_collection_failed(); return JNI_OK; } + CardTableModRefBSForCTRS* GenCollectedHeap::create_barrier_set(MemRegion whole_heap) { + return new CardTableModRefBSForCTRS(whole_heap); + } + char* GenCollectedHeap::allocate(size_t alignment, ReservedSpace* heap_rs){ // Now figure out the total size. const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be");
*** 306,328 **** bool GenCollectedHeap::must_clear_all_soft_refs() { return _gc_cause == GCCause::_metadata_GC_clear_soft_refs || _gc_cause == GCCause::_wb_full_gc; } - bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { - if (!UseConcMarkSweepGC) { - return false; - } - - switch (cause) { - case GCCause::_gc_locker: return GCLockerInvokesConcurrent; - case GCCause::_java_lang_system_gc: - case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent; - default: return false; - } - } - void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, bool run_verification, bool clear_soft_refs, bool restore_marks_for_biased_locking) { FormatBuffer<> title("Collect gen: %s", gen->short_name()); GCTraceTime(Trace, gc, phases) t1(title); --- 292,301 ----
*** 761,778 **** } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { ! if (should_do_concurrent_full_gc(cause)) { ! #if INCLUDE_ALL_GCS ! // Mostly concurrent full collection. ! collect_mostly_concurrent(cause); ! #else // INCLUDE_ALL_GCS ! ShouldNotReachHere(); ! #endif // INCLUDE_ALL_GCS ! } else if (cause == GCCause::_wb_young_gc) { // Young collection for the WhiteBox API. collect(cause, YoungGen); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { --- 734,744 ---- } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { ! if (cause == GCCause::_wb_young_gc) { // Young collection for the WhiteBox API. collect(cause, YoungGen); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) {
*** 815,862 **** cause, max_generation); VMThread::execute(&op); } } - #if INCLUDE_ALL_GCS - bool GenCollectedHeap::create_cms_collector() { - - assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, - "Unexpected generation kinds"); - // Skip two header words in the block content verification - NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) - assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type"); - CMSCollector* collector = - new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen, - _rem_set, - _gen_policy->as_concurrent_mark_sweep_policy()); - - if (collector == NULL || !collector->completed_initialization()) { - if (collector) { - delete collector; // Be nice in embedded situation - } - vm_shutdown_during_initialization("Could not create CMS collector"); - return false; - } - return true; // success - } - - void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { - assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); - - MutexLocker ml(Heap_lock); - // Read the GC counts while holding the Heap_lock - unsigned int full_gc_count_before = total_full_collections(); - unsigned int gc_count_before = total_collections(); - { - MutexUnlocker mu(Heap_lock); - VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); - VMThread::execute(&op); - } - } - #endif // INCLUDE_ALL_GCS - void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { do_full_collection(clear_all_soft_refs, OldGen); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, --- 781,790 ----
*** 1093,1103 **** _young_gen->save_marks(); _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { ! CollectedHeap* heap = Universe::heap(); assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap"); return (GenCollectedHeap*)heap; } --- 1021,1031 ---- _young_gen->save_marks(); _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { ! CollectedHeap* heap = GC::gc()->heap(); assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap"); return (GenCollectedHeap*)heap; }
*** 1127,1161 **** void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { if (workers() != NULL) { workers()->threads_do(tc); } - #if INCLUDE_ALL_GCS - if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::threads_do(tc); - } - #endif // INCLUDE_ALL_GCS } void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { - #if INCLUDE_ALL_GCS - if (UseConcMarkSweepGC) { - workers()->print_worker_threads_on(st); - ConcurrentMarkSweepThread::print_all_on(st); - } - #endif // INCLUDE_ALL_GCS } void GenCollectedHeap::print_on_error(outputStream* st) const { this->CollectedHeap::print_on_error(st); - - #if INCLUDE_ALL_GCS - if (UseConcMarkSweepGC) { - st->cr(); - CMSCollector::print_on_error(st); - } - #endif // INCLUDE_ALL_GCS } void GenCollectedHeap::print_tracing_info() const { if (TraceYoungGenTime) { _young_gen->print_summary_info(); --- 1055,1071 ----
*** 1183,1193 **** }; void GenCollectedHeap::gc_prologue(bool full) { assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); - always_do_update_barrier = false; // Fill TLAB's and such CollectedHeap::accumulate_statistics_all_tlabs(); ensure_parsability(true); // retire TLABs // Walk generations --- 1093,1102 ----
*** 1222,1232 **** } MetaspaceCounters::update_performance_counters(); CompressedClassSpaceCounters::update_performance_counters(); - always_do_update_barrier = UseConcMarkSweepGC; }; #ifndef PRODUCT class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { private: --- 1131,1140 ----
*** 1303,1315 **** ". returning zero instead.", retVal); return 0; } return retVal; } - - void GenCollectedHeap::stop() { - #if INCLUDE_ALL_GCS - if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::cmst()->stop(); - } - #endif - } --- 1211,1215 ----
< prev index next >