< prev index next >

src/share/vm/gc/shared/collectorPolicy.cpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/cardTableRS.hpp"
  28 #include "gc/shared/collectorPolicy.hpp"
  29 #include "gc/shared/gcLocker.inline.hpp"
  30 #include "gc/shared/gcPolicyCounters.hpp"
  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/generationSpec.hpp"
  33 #include "gc/shared/space.hpp"
  34 #include "gc/shared/vmGCOperations.hpp"

  35 #include "memory/universe.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/globals_extension.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/thread.inline.hpp"
  41 #include "runtime/vmThread.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 // CollectorPolicy methods
  45 
  46 CollectorPolicy::CollectorPolicy() :
  47     _space_alignment(0),
  48     _heap_alignment(0),
  49     _initial_heap_byte_size(InitialHeapSize),
  50     _max_heap_byte_size(MaxHeapSize),
  51     _min_heap_byte_size(Arguments::min_heap_size()),
  52     _max_heap_size_cmdline(false),
  53     _size_policy(NULL),
  54     _should_clear_all_soft_refs(false),


 120     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
 121   }
 122   if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
 123     FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
 124   } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
 125     FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
 126     if (InitialHeapSize < _min_heap_byte_size) {
 127       _min_heap_byte_size = InitialHeapSize;
 128     }
 129   }
 130 
 131   _initial_heap_byte_size = InitialHeapSize;
 132   _max_heap_byte_size = MaxHeapSize;
 133 
 134   FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
 135 
 136   DEBUG_ONLY(CollectorPolicy::assert_flags();)
 137 }
 138 
 139 void CollectorPolicy::initialize_size_info() {
 140   if (PrintGCDetails && Verbose) {
 141     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
 142       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
 143       _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
 144   }
 145 
 146   DEBUG_ONLY(CollectorPolicy::assert_size_info();)
 147 }
 148 
 149 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
 150   bool result = _should_clear_all_soft_refs;
 151   set_should_clear_all_soft_refs(false);
 152   return result;
 153 }
 154 
 155 CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
 156   return new CardTableRS(whole_heap);
 157 }
 158 
 159 void CollectorPolicy::cleared_all_soft_refs() {
 160   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
 161   // have been cleared in the last collection but if the gc overhear
 162   // limit continues to be near, SoftRefs should still be cleared.
 163   if (size_policy() != NULL) {
 164     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();


 471     if (_max_heap_byte_size == _min_heap_byte_size) {
 472       _min_young_size = _max_young_size;
 473     }
 474   } else {
 475     if (FLAG_IS_CMDLINE(NewSize)) {
 476       // If NewSize is set on the command line, we should use it as
 477       // the initial size, but make sure it is within the heap bounds.
 478       _initial_young_size =
 479         MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
 480       _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size);
 481     } else {
 482       // For the case where NewSize is not set on the command line, use
 483       // NewRatio to size the initial generation size. Use the current
 484       // NewSize as the floor, because if NewRatio is overly large, the resulting
 485       // size can be too small.
 486       _initial_young_size =
 487         MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
 488     }
 489   }
 490 
 491   if (PrintGCDetails && Verbose) {
 492     gclog_or_tty->print_cr("1: Minimum young " SIZE_FORMAT "  Initial young "
 493       SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
 494       _min_young_size, _initial_young_size, _max_young_size);
 495   }
 496 
 497   // At this point the minimum, initial and maximum sizes
 498   // of the overall heap and of the young generation have been determined.
 499   // The maximum old size can be determined from the maximum young
 500   // and maximum heap size since no explicit flags exist
 501   // for setting the old generation maximum.
 502   _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment);
 503 
 504   // If no explicit command line flag has been set for the
 505   // old generation size, use what is left.
 506   if (!FLAG_IS_CMDLINE(OldSize)) {
 507     // The user has not specified any value but the ergonomics
 508     // may have chosen a value (which may or may not be consistent
 509     // with the overall heap size).  In either case make
 510     // the minimum, maximum and initial sizes consistent
 511     // with the young sizes and the overall heap sizes.
 512     _min_old_size = _gen_alignment;
 513     _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size));
 514     // _max_old_size has already been made consistent above.
 515   } else {


 541 
 542     size_t desired_young_size = _initial_heap_byte_size - _initial_old_size;
 543     if (_initial_heap_byte_size < _initial_old_size) {
 544       // Old want all memory, use minimum for young and rest for old
 545       _initial_young_size = _min_young_size;
 546       _initial_old_size = _initial_heap_byte_size - _min_young_size;
 547     } else if (desired_young_size > _max_young_size) {
 548       // Need to increase both young and old generation
 549       _initial_young_size = _max_young_size;
 550       _initial_old_size = _initial_heap_byte_size - _max_young_size;
 551     } else if (desired_young_size < _min_young_size) {
 552       // Need to decrease both young and old generation
 553       _initial_young_size = _min_young_size;
 554       _initial_old_size = _initial_heap_byte_size - _min_young_size;
 555     } else {
 556       // The young generation boundaries allow us to only update the
 557       // young generation.
 558       _initial_young_size = desired_young_size;
 559     }
 560 
 561     if (PrintGCDetails && Verbose) {
 562       gclog_or_tty->print_cr("2: Minimum young " SIZE_FORMAT "  Initial young "
 563         SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
 564         _min_young_size, _initial_young_size, _max_young_size);
 565     }
 566   }
 567 
 568   // Write back to flags if necessary.
 569   if (NewSize != _initial_young_size) {
 570     FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
 571   }
 572 
 573   if (MaxNewSize != _max_young_size) {
 574     FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
 575   }
 576 
 577   if (OldSize != _initial_old_size) {
 578     FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
 579   }
 580 
 581   if (PrintGCDetails && Verbose) {
 582     gclog_or_tty->print_cr("Minimum old " SIZE_FORMAT "  Initial old "
 583       SIZE_FORMAT "  Maximum old " SIZE_FORMAT,
 584       _min_old_size, _initial_old_size, _max_old_size);
 585   }
 586 
 587   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 588 }
 589 
 590 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
 591                                         bool is_tlab,
 592                                         bool* gc_overhead_limit_was_exceeded) {
 593   GenCollectedHeap *gch = GenCollectedHeap::heap();
 594 
 595   debug_only(gch->check_for_valid_allocation_state());
 596   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
 597 
 598   // In general gc_overhead_limit_was_exceeded should be false so
 599   // set it so here and reset it to true only if the gc time
 600   // limit is being exceeded as checked below.
 601   *gc_overhead_limit_was_exceeded = false;
 602 
 603   HeapWord* result = NULL;
 604 
 605   // Loop until the allocation is satisfied, or unsatisfied after GC.
 606   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 607     HandleMark hm; // Discard any handles allocated in each iteration.
 608 
 609     // First allocation attempt is lock-free.
 610     Generation *young = gch->young_gen();
 611     assert(young->supports_inline_contig_alloc(),
 612       "Otherwise, must do alloc within heap lock");
 613     if (young->should_allocate(size, is_tlab)) {
 614       result = young->par_allocate(size, is_tlab);
 615       if (result != NULL) {
 616         assert(gch->is_in_reserved(result), "result not in heap");
 617         return result;
 618       }
 619     }
 620     uint gc_count_before;  // Read inside the Heap_lock locked region.
 621     {
 622       MutexLocker ml(Heap_lock);
 623       if (PrintGC && Verbose) {
 624         gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
 625                                " attempting locked slow path allocation");
 626       }
 627       // Note that only large objects get a shot at being
 628       // allocated in later generations.
 629       bool first_only = ! should_try_older_generation_allocation(size);
 630 
 631       result = gch->attempt_allocation(size, is_tlab, first_only);
 632       if (result != NULL) {
 633         assert(gch->is_in_reserved(result), "result not in heap");
 634         return result;
 635       }
 636 
 637       if (GC_locker::is_active_and_needs_gc()) {
 638         if (is_tlab) {
 639           return NULL;  // Caller will retry allocating individual object.
 640         }
 641         if (!gch->is_maximal_no_gc()) {
 642           // Try and expand heap to satisfy request.
 643           result = expand_heap_and_allocate(size, is_tlab);
 644           // Result could be null if we are out of space.
 645           if (result != NULL) {
 646             return result;


 740   GenCollectedHeap *gch = GenCollectedHeap::heap();
 741   GCCauseSetter x(gch, GCCause::_allocation_failure);
 742   HeapWord* result = NULL;
 743 
 744   assert(size != 0, "Precondition violated");
 745   if (GC_locker::is_active_and_needs_gc()) {
 746     // GC locker is active; instead of a collection we will attempt
 747     // to expand the heap, if there's room for expansion.
 748     if (!gch->is_maximal_no_gc()) {
 749       result = expand_heap_and_allocate(size, is_tlab);
 750     }
 751     return result;   // Could be null if we are out of space.
 752   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
 753     // Do an incremental collection.
 754     gch->do_collection(false,                     // full
 755                        false,                     // clear_all_soft_refs
 756                        size,                      // size
 757                        is_tlab,                   // is_tlab
 758                        GenCollectedHeap::OldGen); // max_generation
 759   } else {
 760     if (Verbose && PrintGCDetails) {
 761       gclog_or_tty->print(" :: Trying full because partial may fail :: ");
 762     }
 763     // Try a full collection; see delta for bug id 6266275
 764     // for the original code and why this has been simplified
 765     // with from-space allocation criteria modified and
 766     // such allocation moved out of the safepoint path.
 767     gch->do_collection(true,                      // full
 768                        false,                     // clear_all_soft_refs
 769                        size,                      // size
 770                        is_tlab,                   // is_tlab
 771                        GenCollectedHeap::OldGen); // max_generation
 772   }
 773 
 774   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
 775 
 776   if (result != NULL) {
 777     assert(gch->is_in_reserved(result), "result not in heap");
 778     return result;
 779   }
 780 
 781   // OK, collection failed, try expansion.
 782   result = expand_heap_and_allocate(size, is_tlab);




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/cardTableRS.hpp"
  28 #include "gc/shared/collectorPolicy.hpp"
  29 #include "gc/shared/gcLocker.inline.hpp"
  30 #include "gc/shared/gcPolicyCounters.hpp"
  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/generationSpec.hpp"
  33 #include "gc/shared/space.hpp"
  34 #include "gc/shared/vmGCOperations.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/globals_extension.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/macros.hpp"
  44 
  45 // CollectorPolicy methods
  46 
  47 CollectorPolicy::CollectorPolicy() :
  48     _space_alignment(0),
  49     _heap_alignment(0),
  50     _initial_heap_byte_size(InitialHeapSize),
  51     _max_heap_byte_size(MaxHeapSize),
  52     _min_heap_byte_size(Arguments::min_heap_size()),
  53     _max_heap_size_cmdline(false),
  54     _size_policy(NULL),
  55     _should_clear_all_soft_refs(false),


 121     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
 122   }
 123   if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
 124     FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
 125   } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
 126     FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
 127     if (InitialHeapSize < _min_heap_byte_size) {
 128       _min_heap_byte_size = InitialHeapSize;
 129     }
 130   }
 131 
 132   _initial_heap_byte_size = InitialHeapSize;
 133   _max_heap_byte_size = MaxHeapSize;
 134 
 135   FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
 136 
 137   DEBUG_ONLY(CollectorPolicy::assert_flags();)
 138 }
 139 
 140 void CollectorPolicy::initialize_size_info() {
 141   log_debug(heap)("Minimum heap " SIZE_FORMAT "  Initial heap " SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,


 142                   _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);

 143 
 144   DEBUG_ONLY(CollectorPolicy::assert_size_info();)
 145 }
 146 
 147 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
 148   bool result = _should_clear_all_soft_refs;
 149   set_should_clear_all_soft_refs(false);
 150   return result;
 151 }
 152 
 153 CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
 154   return new CardTableRS(whole_heap);
 155 }
 156 
 157 void CollectorPolicy::cleared_all_soft_refs() {
 158   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
 159   // have been cleared in the last collection but if the gc overhear
 160   // limit continues to be near, SoftRefs should still be cleared.
 161   if (size_policy() != NULL) {
 162     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();


 469     if (_max_heap_byte_size == _min_heap_byte_size) {
 470       _min_young_size = _max_young_size;
 471     }
 472   } else {
 473     if (FLAG_IS_CMDLINE(NewSize)) {
 474       // If NewSize is set on the command line, we should use it as
 475       // the initial size, but make sure it is within the heap bounds.
 476       _initial_young_size =
 477         MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
 478       _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size);
 479     } else {
 480       // For the case where NewSize is not set on the command line, use
 481       // NewRatio to size the initial generation size. Use the current
 482       // NewSize as the floor, because if NewRatio is overly large, the resulting
 483       // size can be too small.
 484       _initial_young_size =
 485         MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
 486     }
 487   }
 488 
 489   log_trace(heap)("1: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,


 490                   _min_young_size, _initial_young_size, _max_young_size);

 491 
 492   // At this point the minimum, initial and maximum sizes
 493   // of the overall heap and of the young generation have been determined.
 494   // The maximum old size can be determined from the maximum young
 495   // and maximum heap size since no explicit flags exist
 496   // for setting the old generation maximum.
 497   _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment);
 498 
 499   // If no explicit command line flag has been set for the
 500   // old generation size, use what is left.
 501   if (!FLAG_IS_CMDLINE(OldSize)) {
 502     // The user has not specified any value but the ergonomics
 503     // may have chosen a value (which may or may not be consistent
 504     // with the overall heap size).  In either case make
 505     // the minimum, maximum and initial sizes consistent
 506     // with the young sizes and the overall heap sizes.
 507     _min_old_size = _gen_alignment;
 508     _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size));
 509     // _max_old_size has already been made consistent above.
 510   } else {


 536 
 537     size_t desired_young_size = _initial_heap_byte_size - _initial_old_size;
 538     if (_initial_heap_byte_size < _initial_old_size) {
 539       // Old want all memory, use minimum for young and rest for old
 540       _initial_young_size = _min_young_size;
 541       _initial_old_size = _initial_heap_byte_size - _min_young_size;
 542     } else if (desired_young_size > _max_young_size) {
 543       // Need to increase both young and old generation
 544       _initial_young_size = _max_young_size;
 545       _initial_old_size = _initial_heap_byte_size - _max_young_size;
 546     } else if (desired_young_size < _min_young_size) {
 547       // Need to decrease both young and old generation
 548       _initial_young_size = _min_young_size;
 549       _initial_old_size = _initial_heap_byte_size - _min_young_size;
 550     } else {
 551       // The young generation boundaries allow us to only update the
 552       // young generation.
 553       _initial_young_size = desired_young_size;
 554     }
 555 
 556     log_trace(heap)("2: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,


 557                     _min_young_size, _initial_young_size, _max_young_size);
 558   }

 559 
 560   // Write back to flags if necessary.
 561   if (NewSize != _initial_young_size) {
 562     FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
 563   }
 564 
 565   if (MaxNewSize != _max_young_size) {
 566     FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
 567   }
 568 
 569   if (OldSize != _initial_old_size) {
 570     FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
 571   }
 572 
 573   log_trace(heap)("Minimum old " SIZE_FORMAT "  Initial old " SIZE_FORMAT "  Maximum old " SIZE_FORMAT,


 574                   _min_old_size, _initial_old_size, _max_old_size);

 575 
 576   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 577 }
 578 
 579 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
 580                                         bool is_tlab,
 581                                         bool* gc_overhead_limit_was_exceeded) {
 582   GenCollectedHeap *gch = GenCollectedHeap::heap();
 583 
 584   debug_only(gch->check_for_valid_allocation_state());
 585   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
 586 
 587   // In general gc_overhead_limit_was_exceeded should be false so
 588   // set it so here and reset it to true only if the gc time
 589   // limit is being exceeded as checked below.
 590   *gc_overhead_limit_was_exceeded = false;
 591 
 592   HeapWord* result = NULL;
 593 
 594   // Loop until the allocation is satisfied, or unsatisfied after GC.
 595   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 596     HandleMark hm; // Discard any handles allocated in each iteration.
 597 
 598     // First allocation attempt is lock-free.
 599     Generation *young = gch->young_gen();
 600     assert(young->supports_inline_contig_alloc(),
 601       "Otherwise, must do alloc within heap lock");
 602     if (young->should_allocate(size, is_tlab)) {
 603       result = young->par_allocate(size, is_tlab);
 604       if (result != NULL) {
 605         assert(gch->is_in_reserved(result), "result not in heap");
 606         return result;
 607       }
 608     }
 609     uint gc_count_before;  // Read inside the Heap_lock locked region.
 610     {
 611       MutexLocker ml(Heap_lock);
 612       log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");



 613       // Note that only large objects get a shot at being
 614       // allocated in later generations.
 615       bool first_only = ! should_try_older_generation_allocation(size);
 616 
 617       result = gch->attempt_allocation(size, is_tlab, first_only);
 618       if (result != NULL) {
 619         assert(gch->is_in_reserved(result), "result not in heap");
 620         return result;
 621       }
 622 
 623       if (GC_locker::is_active_and_needs_gc()) {
 624         if (is_tlab) {
 625           return NULL;  // Caller will retry allocating individual object.
 626         }
 627         if (!gch->is_maximal_no_gc()) {
 628           // Try and expand heap to satisfy request.
 629           result = expand_heap_and_allocate(size, is_tlab);
 630           // Result could be null if we are out of space.
 631           if (result != NULL) {
 632             return result;


 726   GenCollectedHeap *gch = GenCollectedHeap::heap();
 727   GCCauseSetter x(gch, GCCause::_allocation_failure);
 728   HeapWord* result = NULL;
 729 
 730   assert(size != 0, "Precondition violated");
 731   if (GC_locker::is_active_and_needs_gc()) {
 732     // GC locker is active; instead of a collection we will attempt
 733     // to expand the heap, if there's room for expansion.
 734     if (!gch->is_maximal_no_gc()) {
 735       result = expand_heap_and_allocate(size, is_tlab);
 736     }
 737     return result;   // Could be null if we are out of space.
 738   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
 739     // Do an incremental collection.
 740     gch->do_collection(false,                     // full
 741                        false,                     // clear_all_soft_refs
 742                        size,                      // size
 743                        is_tlab,                   // is_tlab
 744                        GenCollectedHeap::OldGen); // max_generation
 745   } else {
 746     log_trace(gc)(" :: Trying full because partial may fail :: ");


 747     // Try a full collection; see delta for bug id 6266275
 748     // for the original code and why this has been simplified
 749     // with from-space allocation criteria modified and
 750     // such allocation moved out of the safepoint path.
 751     gch->do_collection(true,                      // full
 752                        false,                     // clear_all_soft_refs
 753                        size,                      // size
 754                        is_tlab,                   // is_tlab
 755                        GenCollectedHeap::OldGen); // max_generation
 756   }
 757 
 758   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
 759 
 760   if (result != NULL) {
 761     assert(gch->is_in_reserved(result), "result not in heap");
 762     return result;
 763   }
 764 
 765   // OK, collection failed, try expansion.
 766   result = expand_heap_and_allocate(size, is_tlab);


< prev index next >