< prev index next >

src/share/vm/memory/defNewGeneration.cpp

Print this page




 534       result = from()->allocate(size);
 535     } else if (PrintGC && Verbose) {
 536       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 537     }
 538   } else if (PrintGC && Verbose) {
 539     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 540   }
 541   if (PrintGC && Verbose) {
 542     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 543   }
 544   return result;
 545 }
 546 
 547 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 548                                                 bool   is_tlab,
 549                                                 bool   parallel) {
 550   // We don't attempt to expand the young generation (but perhaps we should.)
 551   return allocate(size, is_tlab);
 552 }
 553 
 554 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 555   // Set the desired survivor size to half the real survivor space
 556   _tenuring_threshold =
 557     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 558 }
 559 
 560 void DefNewGeneration::collect(bool   full,
 561                                bool   clear_all_soft_refs,
 562                                size_t size,
 563                                bool   is_tlab) {
 564   assert(full || size > 0, "otherwise we don't want to collect");
 565 
 566   GenCollectedHeap* gch = GenCollectedHeap::heap();
 567 
 568   _gc_timer->register_gc_start();
 569   DefNewTracer gc_tracer;
 570   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 571 
 572   _next_gen = gch->next_gen(this);
 573 
 574   // If the next generation is too full to accommodate promotion
 575   // from this generation, pass on collection; let the next generation
 576   // do it.
 577   if (!collection_attempt_is_safe()) {


 647   gc_tracer.report_gc_reference_stats(stats);
 648 
 649   if (!_promotion_failed) {
 650     // Swap the survivor spaces.
 651     eden()->clear(SpaceDecorator::Mangle);
 652     from()->clear(SpaceDecorator::Mangle);
 653     if (ZapUnusedHeapArea) {
 654       // This is now done here because of the piece-meal mangling which
 655       // can check for valid mangling at intermediate points in the
 656       // collection(s).  When a minor collection fails to collect
 657       // sufficient space resizing of the young generation can occur
 658       // an redistribute the spaces in the young generation.  Mangle
 659       // here so that unzapped regions don't get distributed to
 660       // other spaces.
 661       to()->mangle_unused_area();
 662     }
 663     swap_spaces();
 664 
 665     assert(to()->is_empty(), "to space should be empty now");
 666 
 667     adjust_desired_tenuring_threshold();
 668 
 669     // A successful scavenge should restart the GC time limit count which is
 670     // for full GC's.
 671     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 672     size_policy->reset_gc_overhead_limit_count();
 673     if (PrintGC && !PrintGCDetails) {
 674       gch->print_heap_change(gch_prev_used);
 675     }
 676     assert(!gch->incremental_collection_failed(), "Should be clear");
 677   } else {
 678     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 679     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 680 
 681     remove_forwarding_pointers();
 682     if (PrintGCDetails) {
 683       gclog_or_tty->print(" (promotion failed) ");
 684     }
 685     // Add to-space to the list of space to compact
 686     // when a promotion failure has occurred.  In that
 687     // case there can be live objects in to-space




 534       result = from()->allocate(size);
 535     } else if (PrintGC && Verbose) {
 536       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 537     }
 538   } else if (PrintGC && Verbose) {
 539     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 540   }
 541   if (PrintGC && Verbose) {
 542     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 543   }
 544   return result;
 545 }
 546 
 547 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 548                                                 bool   is_tlab,
 549                                                 bool   parallel) {
 550   // We don't attempt to expand the young generation (but perhaps we should.)
 551   return allocate(size, is_tlab);
 552 }
 553 
 554 void DefNewGeneration::adjust_desired_tenuring_threshold(GCTracer &tracer) {
 555   // Set the desired survivor size to half the real survivor space
 556   _tenuring_threshold =
 557     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, tracer);
 558 }
 559 
 560 void DefNewGeneration::collect(bool   full,
 561                                bool   clear_all_soft_refs,
 562                                size_t size,
 563                                bool   is_tlab) {
 564   assert(full || size > 0, "otherwise we don't want to collect");
 565 
 566   GenCollectedHeap* gch = GenCollectedHeap::heap();
 567 
 568   _gc_timer->register_gc_start();
 569   DefNewTracer gc_tracer;
 570   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 571 
 572   _next_gen = gch->next_gen(this);
 573 
 574   // If the next generation is too full to accommodate promotion
 575   // from this generation, pass on collection; let the next generation
 576   // do it.
 577   if (!collection_attempt_is_safe()) {


 647   gc_tracer.report_gc_reference_stats(stats);
 648 
 649   if (!_promotion_failed) {
 650     // Swap the survivor spaces.
 651     eden()->clear(SpaceDecorator::Mangle);
 652     from()->clear(SpaceDecorator::Mangle);
 653     if (ZapUnusedHeapArea) {
 654       // This is now done here because of the piece-meal mangling which
 655       // can check for valid mangling at intermediate points in the
 656       // collection(s).  When a minor collection fails to collect
 657       // sufficient space resizing of the young generation can occur
 658       // an redistribute the spaces in the young generation.  Mangle
 659       // here so that unzapped regions don't get distributed to
 660       // other spaces.
 661       to()->mangle_unused_area();
 662     }
 663     swap_spaces();
 664 
 665     assert(to()->is_empty(), "to space should be empty now");
 666 
 667     adjust_desired_tenuring_threshold(gc_tracer);
 668 
 669     // A successful scavenge should restart the GC time limit count which is
 670     // for full GC's.
 671     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 672     size_policy->reset_gc_overhead_limit_count();
 673     if (PrintGC && !PrintGCDetails) {
 674       gch->print_heap_change(gch_prev_used);
 675     }
 676     assert(!gch->incremental_collection_failed(), "Should be clear");
 677   } else {
 678     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 679     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 680 
 681     remove_forwarding_pointers();
 682     if (PrintGCDetails) {
 683       gclog_or_tty->print(" (promotion failed) ");
 684     }
 685     // Add to-space to the list of space to compact
 686     // when a promotion failure has occurred.  In that
 687     // case there can be live objects in to-space


< prev index next >