< prev index next >

src/share/vm/gc/serial/defNewGeneration.cpp

Print this page




 566   _gc_timer->register_gc_start();
 567   DefNewTracer gc_tracer;
 568   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 569 
 570   _old_gen = gch->old_gen();
 571 
 572   // If the next generation is too full to accommodate promotion
 573   // from this generation, pass on collection; let the next generation
 574   // do it.
 575   if (!collection_attempt_is_safe()) {
 576     if (Verbose && PrintGCDetails) {
 577       gclog_or_tty->print(" :: Collection attempt not safe :: ");
 578     }
 579     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 580     return;
 581   }
 582   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 583 
 584   init_assuming_no_promotion_failure();
 585 
 586   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
 587   // Capture heap used before collection (for printing).
 588   size_t gch_prev_used = gch->used();
 589 
 590   gch->trace_heap_before_gc(&gc_tracer);
 591 
 592   // These can be shared for all code paths
 593   IsAliveClosure is_alive(this);
 594   ScanWeakRefClosure scan_weak_ref(this);
 595 
 596   age_table()->clear();
 597   to()->clear(SpaceDecorator::Mangle);
 598 
 599   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 600 
 601   assert(gch->no_allocs_since_save_marks(),
 602          "save marks have not been newly set.");
 603 
 604   // Not very pretty.
 605   CollectorPolicy* cp = gch->collector_policy();
 606 


 629 
 630     gch->gen_process_roots(&srs,
 631                            GenCollectedHeap::YoungGen,
 632                            true,  // Process younger gens, if any,
 633                                   // as strong roots.
 634                            GenCollectedHeap::SO_ScavengeCodeCache,
 635                            GenCollectedHeap::StrongAndWeakRoots,
 636                            &fsc_with_no_gc_barrier,
 637                            &fsc_with_gc_barrier,
 638                            &cld_scan_closure);
 639   }
 640 
 641   // "evacuate followers".
 642   evacuate_followers.do_void();
 643 
 644   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 645   ReferenceProcessor* rp = ref_processor();
 646   rp->setup_policy(clear_all_soft_refs);
 647   const ReferenceProcessorStats& stats =
 648   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 649                                     NULL, _gc_timer, gc_tracer.gc_id());
 650   gc_tracer.report_gc_reference_stats(stats);
 651 
 652   if (!_promotion_failed) {
 653     // Swap the survivor spaces.
 654     eden()->clear(SpaceDecorator::Mangle);
 655     from()->clear(SpaceDecorator::Mangle);
 656     if (ZapUnusedHeapArea) {
 657       // This is now done here because of the piece-meal mangling which
 658       // can check for valid mangling at intermediate points in the
 659       // collection(s).  When a young collection fails to collect
 660       // sufficient space resizing of the young generation can occur
 661       // an redistribute the spaces in the young generation.  Mangle
 662       // here so that unzapped regions don't get distributed to
 663       // other spaces.
 664       to()->mangle_unused_area();
 665     }
 666     swap_spaces();
 667 
 668     assert(to()->is_empty(), "to space should be empty now");
 669 




 566   _gc_timer->register_gc_start();
 567   DefNewTracer gc_tracer;
 568   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 569 
 570   _old_gen = gch->old_gen();
 571 
 572   // If the next generation is too full to accommodate promotion
 573   // from this generation, pass on collection; let the next generation
 574   // do it.
 575   if (!collection_attempt_is_safe()) {
 576     if (Verbose && PrintGCDetails) {
 577       gclog_or_tty->print(" :: Collection attempt not safe :: ");
 578     }
 579     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 580     return;
 581   }
 582   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 583 
 584   init_assuming_no_promotion_failure();
 585 
 586   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 587   // Capture heap used before collection (for printing).
 588   size_t gch_prev_used = gch->used();
 589 
 590   gch->trace_heap_before_gc(&gc_tracer);
 591 
 592   // These can be shared for all code paths
 593   IsAliveClosure is_alive(this);
 594   ScanWeakRefClosure scan_weak_ref(this);
 595 
 596   age_table()->clear();
 597   to()->clear(SpaceDecorator::Mangle);
 598 
 599   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 600 
 601   assert(gch->no_allocs_since_save_marks(),
 602          "save marks have not been newly set.");
 603 
 604   // Not very pretty.
 605   CollectorPolicy* cp = gch->collector_policy();
 606 


 629 
 630     gch->gen_process_roots(&srs,
 631                            GenCollectedHeap::YoungGen,
 632                            true,  // Process younger gens, if any,
 633                                   // as strong roots.
 634                            GenCollectedHeap::SO_ScavengeCodeCache,
 635                            GenCollectedHeap::StrongAndWeakRoots,
 636                            &fsc_with_no_gc_barrier,
 637                            &fsc_with_gc_barrier,
 638                            &cld_scan_closure);
 639   }
 640 
 641   // "evacuate followers".
 642   evacuate_followers.do_void();
 643 
 644   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 645   ReferenceProcessor* rp = ref_processor();
 646   rp->setup_policy(clear_all_soft_refs);
 647   const ReferenceProcessorStats& stats =
 648   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 649                                     NULL, _gc_timer);
 650   gc_tracer.report_gc_reference_stats(stats);
 651 
 652   if (!_promotion_failed) {
 653     // Swap the survivor spaces.
 654     eden()->clear(SpaceDecorator::Mangle);
 655     from()->clear(SpaceDecorator::Mangle);
 656     if (ZapUnusedHeapArea) {
 657       // This is now done here because of the piece-meal mangling which
 658       // can check for valid mangling at intermediate points in the
 659       // collection(s).  When a young collection fails to collect
 660       // sufficient space resizing of the young generation can occur
 661       // an redistribute the spaces in the young generation.  Mangle
 662       // here so that unzapped regions don't get distributed to
 663       // other spaces.
 664       to()->mangle_unused_area();
 665     }
 666     swap_spaces();
 667 
 668     assert(to()->is_empty(), "to space should be empty now");
 669 


< prev index next >