< prev index

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 7474 : imported patch 8066566

*** 881,891 **** } // A Generation that does parallel young-gen collection. ! void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) { --- 881,891 ---- } // A Generation that does parallel young-gen collection. ! void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) {
*** 897,910 **** gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads ! thread_state_set.trace_promotion_failed(gc_tracer); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) { ! gc_tracer.report_promotion_failed(_promotion_failed_info); } // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } --- 897,910 ---- gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads ! thread_state_set.trace_promotion_failed(_gc_tracer); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) { ! _gc_tracer.report_promotion_failed(_promotion_failed_info); } // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) }
*** 939,960 **** gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); ! ParNewTracer gc_tracer; ! gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! gch->trace_heap_before_gc(&gc_tracer); init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { set_survivor_overflow(false); size_policy->minor_collection_begin(); } ! GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); --- 939,959 ---- gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); ! _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! gch->trace_heap_before_gc(&_gc_tracer); init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { set_survivor_overflow(false); size_policy->minor_collection_begin(); } ! GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear();
*** 992,1002 **** thread_state_set.reset(0 /* Bad value in debug if not reset */, promotion_failed()); // Trace and reset failed promotion info. if (promotion_failed()) { ! thread_state_set.trace_promotion_failed(gc_tracer); } // Process (weak) reference objects found during scavenge. ReferenceProcessor* rp = ref_processor(); IsAliveClosure is_alive(this); --- 991,1001 ---- thread_state_set.reset(0 /* Bad value in debug if not reset */, promotion_failed()); // Trace and reset failed promotion info. if (promotion_failed()) { ! thread_state_set.trace_promotion_failed(_gc_tracer); } // Process (weak) reference objects found during scavenge. ReferenceProcessor* rp = ref_processor(); IsAliveClosure is_alive(this);
*** 1013,1032 **** ReferenceProcessorStats stats; if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, ! _gc_timer, gc_tracer.gc_id()); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, ! _gc_timer, gc_tracer.gc_id()); } ! gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { --- 1012,1031 ---- ReferenceProcessorStats stats; if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, ! _gc_timer, _gc_tracer.gc_id()); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, ! _gc_timer, _gc_tracer.gc_id()); } ! _gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) {
*** 1047,1057 **** assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); } else { ! handle_promotion_failed(gch, thread_state_set, gc_tracer); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); --- 1046,1056 ---- assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); } else { ! handle_promotion_failed(gch, thread_state_set); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top());
*** 1086,1101 **** } else { rp->enqueue_discovered_references(NULL); } rp->verify_no_references_recorded(); ! gch->trace_heap_after_gc(&gc_tracer); ! gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); ! gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; void ParNewGeneration::waste_some_time() { for (int i = 0; i < 100; i++) { --- 1085,1100 ---- } else { rp->enqueue_discovered_references(NULL); } rp->verify_no_references_recorded(); ! gch->trace_heap_after_gc(&_gc_tracer); ! _gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); ! _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; void ParNewGeneration::waste_some_time() { for (int i = 0; i < 100; i++) {
< prev index