< prev index next >

src/share/vm/memory/defNewGeneration.cpp

Print this page




 935 #ifdef ASSERT
 936     // It is possible that incremental_collection_failed() == true
 937     // here, because an attempted scavenge did not succeed. The policy
 938     // is normally expected to cause a full collection which should
 939     // clear that condition, so we should not be here twice in a row
 940     // with incremental_collection_failed() == true without having done
 941     // a full collection in between.
 942     if (!seen_incremental_collection_failed &&
 943         gch->incremental_collection_failed()) {
 944       if (Verbose && PrintGCDetails) {
 945         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 946                             GCCause::to_string(gch->gc_cause()));
 947       }
 948       seen_incremental_collection_failed = true;
 949     } else if (seen_incremental_collection_failed) {
 950       if (Verbose && PrintGCDetails) {
 951         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 952                             GCCause::to_string(gch->gc_cause()));
 953       }
 954       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
 955              (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
 956              !gch->incremental_collection_failed(),
 957              "Twice in a row");
 958       seen_incremental_collection_failed = false;
 959     }
 960 #endif // ASSERT
 961   }
 962 
 963   if (ZapUnusedHeapArea) {
 964     eden()->check_mangled_unused_area_complete();
 965     from()->check_mangled_unused_area_complete();
 966     to()->check_mangled_unused_area_complete();
 967   }
 968 
 969   if (!CleanChunkPoolAsync) {
 970     Chunk::clean_chunk_pool();
 971   }
 972 
 973   // update the generation and space performance counters
 974   update_counters();
 975   gch->collector_policy()->counters()->update_counters();




 935 #ifdef ASSERT
 936     // It is possible that incremental_collection_failed() == true
 937     // here, because an attempted scavenge did not succeed. The policy
 938     // is normally expected to cause a full collection which should
 939     // clear that condition, so we should not be here twice in a row
 940     // with incremental_collection_failed() == true without having done
 941     // a full collection in between.
 942     if (!seen_incremental_collection_failed &&
 943         gch->incremental_collection_failed()) {
 944       if (Verbose && PrintGCDetails) {
 945         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 946                             GCCause::to_string(gch->gc_cause()));
 947       }
 948       seen_incremental_collection_failed = true;
 949     } else if (seen_incremental_collection_failed) {
 950       if (Verbose && PrintGCDetails) {
 951         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 952                             GCCause::to_string(gch->gc_cause()));
 953       }
 954       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
 955              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
 956              !gch->incremental_collection_failed(),
 957              "Twice in a row");
 958       seen_incremental_collection_failed = false;
 959     }
 960 #endif // ASSERT
 961   }
 962 
 963   if (ZapUnusedHeapArea) {
 964     eden()->check_mangled_unused_area_complete();
 965     from()->check_mangled_unused_area_complete();
 966     to()->check_mangled_unused_area_complete();
 967   }
 968 
 969   if (!CleanChunkPoolAsync) {
 970     Chunk::clean_chunk_pool();
 971   }
 972 
 973   // update the generation and space performance counters
 974   update_counters();
 975   gch->collector_policy()->counters()->update_counters();


< prev index next >