174 // byte entry and the os page size is 4096, the maximum heap size should
175 // be 512*4096 = 2MB aligned.
176
177 size_t alignment = GenRemSet::max_alignment_constraint();
178
179 if (UseLargePages) {
180 // In presence of large pages we have to make sure that our
181 // alignment is large page aware.
182 alignment = lcm(os::large_page_size(), alignment);
183 }
184
185 return alignment;
186 }
187
188 // GenCollectorPolicy methods
189
190 GenCollectorPolicy::GenCollectorPolicy() :
191 _min_young_size(0),
192 _initial_young_size(0),
193 _max_young_size(0),
194 _gen_alignment(0),
195 _min_old_size(0),
196 _initial_old_size(0),
197 _max_old_size(0),
198 _generations(NULL)
199 {}
200
201 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
202 return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
203 }
204
205 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
206 size_t maximum_size) {
207 size_t max_minus = maximum_size - _gen_alignment;
208 return desired_size < max_minus ? desired_size : max_minus;
209 }
210
211
212 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
213 size_t init_promo_size,
214 size_t init_survivor_size) {
215 const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
216 _size_policy = new AdaptiveSizePolicy(init_eden_size,
217 init_promo_size,
218 init_survivor_size,
585 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
586 bool is_tlab,
587 bool* gc_overhead_limit_was_exceeded) {
588 GenCollectedHeap *gch = GenCollectedHeap::heap();
589
590 debug_only(gch->check_for_valid_allocation_state());
591 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
592
593 // In general gc_overhead_limit_was_exceeded should be false so
594 // set it so here and reset it to true only if the gc time
595 // limit is being exceeded as checked below.
596 *gc_overhead_limit_was_exceeded = false;
597
598 HeapWord* result = NULL;
599
600 // Loop until the allocation is satisfied, or unsatisfied after GC.
601 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
602 HandleMark hm; // Discard any handles allocated in each iteration.
603
604 // First allocation attempt is lock-free.
605 Generation *young = gch->get_gen(0);
606 assert(young->supports_inline_contig_alloc(),
607 "Otherwise, must do alloc within heap lock");
608 if (young->should_allocate(size, is_tlab)) {
609 result = young->par_allocate(size, is_tlab);
610 if (result != NULL) {
611 assert(gch->is_in_reserved(result), "result not in heap");
612 return result;
613 }
614 }
615 unsigned int gc_count_before; // Read inside the Heap_lock locked region.
616 {
617 MutexLocker ml(Heap_lock);
618 if (PrintGC && Verbose) {
619 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
620 " attempting locked slow path allocation");
621 }
622 // Note that only large objects get a shot at being
623 // allocated in later generations.
624 bool first_only = ! should_try_older_generation_allocation(size);
625
626 result = gch->attempt_allocation(size, is_tlab, first_only);
627 if (result != NULL) {
628 assert(gch->is_in_reserved(result), "result not in heap");
629 return result;
630 }
631
632 if (GC_locker::is_active_and_needs_gc()) {
633 if (is_tlab) {
634 return NULL; // Caller will retry allocating individual object.
635 }
636 if (!gch->is_maximal_no_gc()) {
637 // Try and expand heap to satisfy request.
638 result = expand_heap_and_allocate(size, is_tlab);
639 // Result could be null if we are out of space.
689
690 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
691 const bool softrefs_clear = all_soft_refs_clear();
692
693 if (limit_exceeded && softrefs_clear) {
694 *gc_overhead_limit_was_exceeded = true;
695 size_policy()->set_gc_overhead_limit_exceeded(false);
696 if (op.result() != NULL) {
697 CollectedHeap::fill_with_object(op.result(), size);
698 }
699 return NULL;
700 }
701 assert(result == NULL || gch->is_in_reserved(result),
702 "result not in heap");
703 return result;
704 }
705
706 // Give a warning if we seem to be looping forever.
707 if ((QueuedAllocationWarningCount > 0) &&
708 (try_count % QueuedAllocationWarningCount == 0)) {
709 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
710 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
711 }
712 }
713 }
714
715 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
716 bool is_tlab) {
717 GenCollectedHeap *gch = GenCollectedHeap::heap();
718 HeapWord* result = NULL;
719 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
720 Generation *gen = gch->get_gen(i);
721 if (gen->should_allocate(size, is_tlab)) {
722 result = gen->expand_and_allocate(size, is_tlab);
723 }
724 }
725 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
726 return result;
727 }
728
729 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
730 bool is_tlab) {
731 GenCollectedHeap *gch = GenCollectedHeap::heap();
732 GCCauseSetter x(gch, GCCause::_allocation_failure);
733 HeapWord* result = NULL;
734
735 assert(size != 0, "Precondition violated");
736 if (GC_locker::is_active_and_needs_gc()) {
737 // GC locker is active; instead of a collection we will attempt
738 // to expand the heap, if there's room for expansion.
739 if (!gch->is_maximal_no_gc()) {
740 result = expand_heap_and_allocate(size, is_tlab);
741 }
742 return result; // Could be null if we are out of space.
743 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
744 // Do an incremental collection.
745 gch->do_collection(false /* full */,
746 false /* clear_all_soft_refs */,
747 size /* size */,
748 is_tlab /* is_tlab */,
749 number_of_generations() - 1 /* max_level */);
750 } else {
751 if (Verbose && PrintGCDetails) {
752 gclog_or_tty->print(" :: Trying full because partial may fail :: ");
753 }
754 // Try a full collection; see delta for bug id 6266275
755 // for the original code and why this has been simplified
756 // with from-space allocation criteria modified and
757 // such allocation moved out of the safepoint path.
758 gch->do_collection(true /* full */,
759 false /* clear_all_soft_refs */,
760 size /* size */,
761 is_tlab /* is_tlab */,
762 number_of_generations() - 1 /* max_level */);
763 }
764
765 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
766
767 if (result != NULL) {
768 assert(gch->is_in_reserved(result), "result not in heap");
769 return result;
770 }
771
772 // OK, collection failed, try expansion.
773 result = expand_heap_and_allocate(size, is_tlab);
774 if (result != NULL) {
775 return result;
776 }
777
778 // If we reach this point, we're really out of memory. Try every trick
779 // we can to reclaim memory. Force collection of soft references. Force
780 // a complete compaction of the heap. Any additional methods for finding
781 // free memory should be here, especially if they are expensive. If this
782 // attempt fails, an OOM exception will be thrown.
783 {
784 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
785
786 gch->do_collection(true /* full */,
787 true /* clear_all_soft_refs */,
788 size /* size */,
789 is_tlab /* is_tlab */,
790 number_of_generations() - 1 /* max_level */);
791 }
792
793 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
794 if (result != NULL) {
795 assert(gch->is_in_reserved(result), "result not in heap");
796 return result;
797 }
798
799 assert(!should_clear_all_soft_refs(),
800 "Flag should have been handled and cleared prior to this point");
801
802 // What else? We might try synchronous finalization later. If the total
803 // space available is large enough for the allocation, then a more
804 // complete compaction phase than we've tried so far might be
805 // appropriate.
806 return NULL;
807 }
808
809 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
810 ClassLoaderData* loader_data,
875 return op.result();
876 }
877 loop_count++;
878 if ((QueuedAllocationWarningCount > 0) &&
879 (loop_count % QueuedAllocationWarningCount == 0)) {
880 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
881 " size=" SIZE_FORMAT, loop_count, word_size);
882 }
883 } while (true); // Until a GC is done
884 }
885
886 // Return true if any of the following is true:
887 // . the allocation won't fit into the current young gen heap
888 // . gc locker is occupied (jni critical section)
889 // . heap memory is tight -- the most recent previous collection
890 // was a full collection because a partial collection (would
891 // have) failed and is likely to fail again
892 bool GenCollectorPolicy::should_try_older_generation_allocation(
893 size_t word_size) const {
894 GenCollectedHeap* gch = GenCollectedHeap::heap();
895 size_t young_capacity = gch->get_gen(0)->capacity_before_gc();
896 return (word_size > heap_word_size(young_capacity))
897 || GC_locker::is_active_and_needs_gc()
898 || gch->incremental_collection_failed();
899 }
900
901
902 //
903 // MarkSweepPolicy methods
904 //
905
906 void MarkSweepPolicy::initialize_alignments() {
907 _space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
908 _heap_alignment = compute_heap_alignment();
909 }
910
911 void MarkSweepPolicy::initialize_generations() {
912 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
913 AllocFailStrategy::RETURN_NULL);
914 if (_generations == NULL) {
915 vm_exit_during_initialization("Unable to allocate gen spec");
916 }
917
918 if (UseParNewGC) {
919 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
920 } else {
921 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
922 }
923 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
924
925 if (_generations[0] == NULL || _generations[1] == NULL) {
926 vm_exit_during_initialization("Unable to allocate gen spec");
927 }
928 }
929
930 void MarkSweepPolicy::initialize_gc_policy_counters() {
931 // Initialize the policy counters - 2 collectors, 3 generations.
932 if (UseParNewGC) {
933 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
934 } else {
935 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
936 }
937 }
938
939 /////////////// Unit tests ///////////////
940
941 #ifndef PRODUCT
942 // Testing that the NewSize flag is handled correct is hard because it
943 // depends on so many other configurable variables. This test only tries to
944 // verify that there are some basic rules for NewSize honored by the policies.
945 class TestGenCollectorPolicy {
|
174 // byte entry and the os page size is 4096, the maximum heap size should
175 // be 512*4096 = 2MB aligned.
176
177 size_t alignment = GenRemSet::max_alignment_constraint();
178
179 if (UseLargePages) {
180 // In presence of large pages we have to make sure that our
181 // alignment is large page aware.
182 alignment = lcm(os::large_page_size(), alignment);
183 }
184
185 return alignment;
186 }
187
188 // GenCollectorPolicy methods
189
190 GenCollectorPolicy::GenCollectorPolicy() :
191 _min_young_size(0),
192 _initial_young_size(0),
193 _max_young_size(0),
194 _min_old_size(0),
195 _initial_old_size(0),
196 _max_old_size(0),
197 _gen_alignment(0),
198 _young_gen_spec(NULL),
199 _old_gen_spec(NULL)
200 {}
201
202 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
203 return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
204 }
205
206 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
207 size_t maximum_size) {
208 size_t max_minus = maximum_size - _gen_alignment;
209 return desired_size < max_minus ? desired_size : max_minus;
210 }
211
212
213 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
214 size_t init_promo_size,
215 size_t init_survivor_size) {
216 const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
217 _size_policy = new AdaptiveSizePolicy(init_eden_size,
218 init_promo_size,
219 init_survivor_size,
586 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
587 bool is_tlab,
588 bool* gc_overhead_limit_was_exceeded) {
589 GenCollectedHeap *gch = GenCollectedHeap::heap();
590
591 debug_only(gch->check_for_valid_allocation_state());
592 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
593
594 // In general gc_overhead_limit_was_exceeded should be false so
595 // set it so here and reset it to true only if the gc time
596 // limit is being exceeded as checked below.
597 *gc_overhead_limit_was_exceeded = false;
598
599 HeapWord* result = NULL;
600
601 // Loop until the allocation is satisfied, or unsatisfied after GC.
602 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
603 HandleMark hm; // Discard any handles allocated in each iteration.
604
605 // First allocation attempt is lock-free.
606 Generation *young = gch->young_gen();
607 assert(young->supports_inline_contig_alloc(),
608 "Otherwise, must do alloc within heap lock");
609 if (young->should_allocate(size, is_tlab)) {
610 result = young->par_allocate(size, is_tlab);
611 if (result != NULL) {
612 assert(gch->is_in_reserved(result), "result not in heap");
613 return result;
614 }
615 }
616 unsigned int gc_count_before; // Read inside the Heap_lock locked region.
617 {
618 MutexLocker ml(Heap_lock);
619 if (PrintGC && Verbose) {
620 gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
621 " attempting locked slow path allocation");
622 }
623 // Note that only large objects get a shot at being
624 // allocated in later generations.
625 bool first_only = ! should_try_older_generation_allocation(size);
626
627 result = gch->attempt_allocation(size, is_tlab, first_only);
628 if (result != NULL) {
629 assert(gch->is_in_reserved(result), "result not in heap");
630 return result;
631 }
632
633 if (GC_locker::is_active_and_needs_gc()) {
634 if (is_tlab) {
635 return NULL; // Caller will retry allocating individual object.
636 }
637 if (!gch->is_maximal_no_gc()) {
638 // Try and expand heap to satisfy request.
639 result = expand_heap_and_allocate(size, is_tlab);
640 // Result could be null if we are out of space.
690
691 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
692 const bool softrefs_clear = all_soft_refs_clear();
693
694 if (limit_exceeded && softrefs_clear) {
695 *gc_overhead_limit_was_exceeded = true;
696 size_policy()->set_gc_overhead_limit_exceeded(false);
697 if (op.result() != NULL) {
698 CollectedHeap::fill_with_object(op.result(), size);
699 }
700 return NULL;
701 }
702 assert(result == NULL || gch->is_in_reserved(result),
703 "result not in heap");
704 return result;
705 }
706
707 // Give a warning if we seem to be looping forever.
708 if ((QueuedAllocationWarningCount > 0) &&
709 (try_count % QueuedAllocationWarningCount == 0)) {
710 warning("GenCollectorPolicy::mem_allocate_work retries %d times \n\t"
711 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
712 }
713 }
714 }
715
716 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
717 bool is_tlab) {
718 GenCollectedHeap *gch = GenCollectedHeap::heap();
719 HeapWord* result = NULL;
720 Generation *old = gch->old_gen();
721 if (old->should_allocate(size, is_tlab)) {
722 result = old->expand_and_allocate(size, is_tlab);
723 }
724 if (result == NULL) {
725 Generation *young = gch->young_gen();
726 if (young->should_allocate(size, is_tlab)) {
727 result = young->expand_and_allocate(size, is_tlab);
728 }
729 }
730 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
731 return result;
732 }
733
734 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
735 bool is_tlab) {
736 GenCollectedHeap *gch = GenCollectedHeap::heap();
737 GCCauseSetter x(gch, GCCause::_allocation_failure);
738 HeapWord* result = NULL;
739
740 assert(size != 0, "Precondition violated");
741 if (GC_locker::is_active_and_needs_gc()) {
742 // GC locker is active; instead of a collection we will attempt
743 // to expand the heap, if there's room for expansion.
744 if (!gch->is_maximal_no_gc()) {
745 result = expand_heap_and_allocate(size, is_tlab);
746 }
747 return result; // Could be null if we are out of space.
748 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
749 // Do an incremental collection.
750 gch->do_collection(false /* full */,
751 false /* clear_all_soft_refs */,
752 size /* size */,
753 is_tlab /* is_tlab */,
754 Generation::Old /* max_gen */);
755 } else {
756 if (Verbose && PrintGCDetails) {
757 gclog_or_tty->print(" :: Trying full because partial may fail :: ");
758 }
759 // Try a full collection; see delta for bug id 6266275
760 // for the original code and why this has been simplified
761 // with from-space allocation criteria modified and
762 // such allocation moved out of the safepoint path.
763 gch->do_collection(true /* full */,
764 false /* clear_all_soft_refs */,
765 size /* size */,
766 is_tlab /* is_tlab */,
767 Generation::Old /* max_gen */);
768 }
769
770 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
771
772 if (result != NULL) {
773 assert(gch->is_in_reserved(result), "result not in heap");
774 return result;
775 }
776
777 // OK, collection failed, try expansion.
778 result = expand_heap_and_allocate(size, is_tlab);
779 if (result != NULL) {
780 return result;
781 }
782
783 // If we reach this point, we're really out of memory. Try every trick
784 // we can to reclaim memory. Force collection of soft references. Force
785 // a complete compaction of the heap. Any additional methods for finding
786 // free memory should be here, especially if they are expensive. If this
787 // attempt fails, an OOM exception will be thrown.
788 {
789 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
790
791 gch->do_collection(true /* full */,
792 true /* clear_all_soft_refs */,
793 size /* size */,
794 is_tlab /* is_tlab */,
795 Generation::Old /* max_gen */);
796 }
797
798 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
799 if (result != NULL) {
800 assert(gch->is_in_reserved(result), "result not in heap");
801 return result;
802 }
803
804 assert(!should_clear_all_soft_refs(),
805 "Flag should have been handled and cleared prior to this point");
806
807 // What else? We might try synchronous finalization later. If the total
808 // space available is large enough for the allocation, then a more
809 // complete compaction phase than we've tried so far might be
810 // appropriate.
811 return NULL;
812 }
813
814 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
815 ClassLoaderData* loader_data,
880 return op.result();
881 }
882 loop_count++;
883 if ((QueuedAllocationWarningCount > 0) &&
884 (loop_count % QueuedAllocationWarningCount == 0)) {
885 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
886 " size=" SIZE_FORMAT, loop_count, word_size);
887 }
888 } while (true); // Until a GC is done
889 }
890
891 // Return true if any of the following is true:
892 // . the allocation won't fit into the current young gen heap
893 // . gc locker is occupied (jni critical section)
894 // . heap memory is tight -- the most recent previous collection
895 // was a full collection because a partial collection (would
896 // have) failed and is likely to fail again
897 bool GenCollectorPolicy::should_try_older_generation_allocation(
898 size_t word_size) const {
899 GenCollectedHeap* gch = GenCollectedHeap::heap();
900 size_t young_capacity = gch->young_gen()->capacity_before_gc();
901 return (word_size > heap_word_size(young_capacity))
902 || GC_locker::is_active_and_needs_gc()
903 || gch->incremental_collection_failed();
904 }
905
906
907 //
908 // MarkSweepPolicy methods
909 //
910
911 void MarkSweepPolicy::initialize_alignments() {
912 _space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
913 _heap_alignment = compute_heap_alignment();
914 }
915
916 void MarkSweepPolicy::initialize_generations() {
917 if (UseParNewGC) {
918 _young_gen_spec = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size, _gen_alignment);
919 } else {
920 _young_gen_spec = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size, _gen_alignment);
921 }
922 _old_gen_spec = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size, _gen_alignment);
923
924 if (_young_gen_spec == NULL || _old_gen_spec == NULL) {
925 vm_exit_during_initialization("Unable to allocate gen spec");
926 }
927 }
928
929 void MarkSweepPolicy::initialize_gc_policy_counters() {
930 // Initialize the policy counters - 2 collectors, 3 generations.
931 if (UseParNewGC) {
932 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
933 } else {
934 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
935 }
936 }
937
938 /////////////// Unit tests ///////////////
939
940 #ifndef PRODUCT
941 // Testing that the NewSize flag is handled correct is hard because it
942 // depends on so many other configurable variables. This test only tries to
943 // verify that there are some basic rules for NewSize honored by the policies.
944 class TestGenCollectorPolicy {
|