213 _max_survivor_size = compute_survivor_size(size, alignment);
214 _max_eden_size = size - (2*_max_survivor_size);
215
216 // allocate the performance counters
217 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
218
219 // Generation counters -- generation 0, 3 subspaces
220 _gen_counters = new GenerationCounters("new", 0, 3,
221 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
222 _gc_counters = new CollectorCounters(policy, 0);
223
224 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
225 _gen_counters);
226 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
227 _gen_counters);
228 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
229 _gen_counters);
230
231 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
232 update_counters();
233 _next_gen = NULL;
234 _tenuring_threshold = MaxTenuringThreshold;
235 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
236
237 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
238 }
239
240 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
241 bool clear_space,
242 bool mangle_space) {
243 uintx alignment =
244 GenCollectedHeap::heap()->collector_policy()->space_alignment();
245
246 // If the spaces are being cleared (only done at heap initialization
247 // currently), the survivor spaces need not be empty.
248 // Otherwise, no care is taken for used areas in the survivor spaces
249 // so check.
250 assert(clear_space || (to()->is_empty() && from()->is_empty()),
251 "Initialization of the survivor spaces assumes these are empty");
252
253 // Compute sizes
370 }
371
372 return success;
373 }
374
375
376 void DefNewGeneration::compute_new_size() {
377 // This is called after a gc that includes the following generation
378 // (which is required to exist.) So from-space will normally be empty.
379 // Note that we check both spaces, since if scavenge failed they revert roles.
380 // If not we bail out (otherwise we would have to relocate the objects)
381 if (!from()->is_empty() || !to()->is_empty()) {
382 return;
383 }
384
385 int next_level = level() + 1;
386 GenCollectedHeap* gch = GenCollectedHeap::heap();
387 assert(next_level < gch->_n_gens,
388 "DefNewGeneration cannot be an oldest gen");
389
390 Generation* next_gen = gch->get_gen(next_level);
391 size_t old_size = next_gen->capacity();
392 size_t new_size_before = _virtual_space.committed_size();
393 size_t min_new_size = spec()->init_size();
394 size_t max_new_size = reserved().byte_size();
395 assert(min_new_size <= new_size_before &&
396 new_size_before <= max_new_size,
397 "just checking");
398 // All space sizes must be multiples of Generation::GenGrain.
399 size_t alignment = Generation::GenGrain;
400
401 // Compute desired new generation size based on NewRatio and
402 // NewSizeThreadIncrease
403 size_t desired_new_size = old_size/NewRatio;
404 int threads_count = Threads::number_of_non_daemon_threads();
405 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
406 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
407
408 // Adjust new generation size
409 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
410 assert(desired_new_size <= max_new_size, "just checking");
411
555 }
556
557 void DefNewGeneration::adjust_desired_tenuring_threshold() {
558 // Set the desired survivor size to half the real survivor space
559 _tenuring_threshold =
560 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
561 }
562
563 void DefNewGeneration::collect(bool full,
564 bool clear_all_soft_refs,
565 size_t size,
566 bool is_tlab) {
567 assert(full || size > 0, "otherwise we don't want to collect");
568
569 GenCollectedHeap* gch = GenCollectedHeap::heap();
570
571 _gc_timer->register_gc_start();
572 DefNewTracer gc_tracer;
573 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
574
575 _next_gen = gch->next_gen(this);
576
577 // If the next generation is too full to accommodate promotion
578 // from this generation, pass on collection; let the next generation
579 // do it.
580 if (!collection_attempt_is_safe()) {
581 if (Verbose && PrintGCDetails) {
582 gclog_or_tty->print(" :: Collection attempt not safe :: ");
583 }
584 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
585 return;
586 }
587 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
588
589 init_assuming_no_promotion_failure();
590
591 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
592 // Capture heap used before collection (for printing).
593 size_t gch_prev_used = gch->used();
594
595 gch->trace_heap_before_gc(&gc_tracer);
675 size_policy->reset_gc_overhead_limit_count();
676 assert(!gch->incremental_collection_failed(), "Should be clear");
677 } else {
678 assert(_promo_failure_scan_stack.is_empty(), "post condition");
679 _promo_failure_scan_stack.clear(true); // Clear cached segments.
680
681 remove_forwarding_pointers();
682 if (PrintGCDetails) {
683 gclog_or_tty->print(" (promotion failed) ");
684 }
685 // Add to-space to the list of space to compact
686 // when a promotion failure has occurred. In that
687 // case there can be live objects in to-space
688 // as a result of a partial evacuation of eden
689 // and from-space.
690 swap_spaces(); // For uniformity wrt ParNewGeneration.
691 from()->set_next_compaction_space(to());
692 gch->set_incremental_collection_failed();
693
694 // Inform the next generation that a promotion failure occurred.
695 _next_gen->promotion_failure_occurred();
696 gc_tracer.report_promotion_failed(_promotion_failed_info);
697
698 // Reset the PromotionFailureALot counters.
699 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
700 }
701 if (PrintGC && !PrintGCDetails) {
702 gch->print_heap_change(gch_prev_used);
703 }
704 // set new iteration safe limit for the survivor spaces
705 from()->set_concurrent_iteration_safe_limit(from()->top());
706 to()->set_concurrent_iteration_safe_limit(to()->top());
707 SpecializationStats::print();
708
709 // We need to use a monotonically non-decreasing time in ms
710 // or we will see time-warp warnings and os::javaTimeMillis()
711 // does not guarantee monotonicity.
712 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
713 update_time_of_last_gc(now);
714
715 gch->trace_heap_after_gc(&gc_tracer);
780 // prevent recursion in copy_to_survivor_space()
781 _promo_failure_drain_in_progress = true;
782 drain_promo_failure_scan_stack();
783 _promo_failure_drain_in_progress = false;
784 }
785 }
786
787 oop DefNewGeneration::copy_to_survivor_space(oop old) {
788 assert(is_in_reserved(old) && !old->is_forwarded(),
789 "shouldn't be scavenging this oop");
790 size_t s = old->size();
791 oop obj = NULL;
792
793 // Try allocating obj in to-space (unless too old)
794 if (old->age() < tenuring_threshold()) {
795 obj = (oop) to()->allocate_aligned(s);
796 }
797
798 // Otherwise try allocating obj tenured
799 if (obj == NULL) {
800 obj = _next_gen->promote(old, s);
801 if (obj == NULL) {
802 handle_promotion_failure(old);
803 return old;
804 }
805 } else {
806 // Prefetch beyond obj
807 const intx interval = PrefetchCopyIntervalInBytes;
808 Prefetch::write(obj, interval);
809
810 // Copy obj
811 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
812
813 // Increment age if obj still in new generation
814 obj->incr_age();
815 age_table()->add(obj, s);
816 }
817
818 // Done, insert forward pointer to obj in this header
819 old->forward_to(obj);
820
885 list = sb;
886 }
887 }
888
889 void DefNewGeneration::reset_scratch() {
890 // If contributing scratch in to_space, mangle all of
891 // to_space if ZapUnusedHeapArea. This is needed because
892 // top is not maintained while using to-space as scratch.
893 if (ZapUnusedHeapArea) {
894 to()->mangle_unused_area_complete();
895 }
896 }
897
898 bool DefNewGeneration::collection_attempt_is_safe() {
899 if (!to()->is_empty()) {
900 if (Verbose && PrintGCDetails) {
901 gclog_or_tty->print(" :: to is not empty :: ");
902 }
903 return false;
904 }
905 if (_next_gen == NULL) {
906 GenCollectedHeap* gch = GenCollectedHeap::heap();
907 _next_gen = gch->next_gen(this);
908 }
909 return _next_gen->promotion_attempt_is_safe(used());
910 }
911
912 void DefNewGeneration::gc_epilogue(bool full) {
913 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
914
915 assert(!GC_locker::is_active(), "We should not be executing here");
916 // Check if the heap is approaching full after a collection has
917 // been done. Generally the young generation is empty at
918 // a minimum at the end of a collection. If it is not, then
919 // the heap is approaching full.
920 GenCollectedHeap* gch = GenCollectedHeap::heap();
921 if (full) {
922 DEBUG_ONLY(seen_incremental_collection_failed = false;)
923 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
924 if (Verbose && PrintGCDetails) {
925 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
926 GCCause::to_string(gch->gc_cause()));
927 }
928 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
929 set_should_allocate_from_space(); // we seem to be running out of space
1009 void DefNewGeneration::print_on(outputStream* st) const {
1010 Generation::print_on(st);
1011 st->print(" eden");
1012 eden()->print_on(st);
1013 st->print(" from");
1014 from()->print_on(st);
1015 st->print(" to ");
1016 to()->print_on(st);
1017 }
1018
1019
1020 const char* DefNewGeneration::name() const {
1021 return "def new generation";
1022 }
1023
1024 // Moved from inline file as they are not called inline
1025 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1026 return eden();
1027 }
1028
1029 HeapWord* DefNewGeneration::allocate(size_t word_size,
1030 bool is_tlab) {
1031 // This is the slow-path allocation for the DefNewGeneration.
1032 // Most allocations are fast-path in compiled code.
1033 // We try to allocate from the eden. If that works, we are happy.
1034 // Note that since DefNewGeneration supports lock-free allocation, we
1035 // have to use it here, as well.
1036 HeapWord* result = eden()->par_allocate(word_size);
1037 if (result != NULL) {
1038 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1039 _next_gen->sample_eden_chunk();
1040 }
1041 return result;
1042 }
1043 do {
1044 HeapWord* old_limit = eden()->soft_end();
1045 if (old_limit < eden()->end()) {
1046 // Tell the next generation we reached a limit.
1047 HeapWord* new_limit =
1048 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1049 if (new_limit != NULL) {
1050 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1051 } else {
1052 assert(eden()->soft_end() == eden()->end(),
1053 "invalid state after allocation_limit_reached returned null");
1054 }
1055 } else {
1056 // The allocation failed and the soft limit is equal to the hard limit,
1057 // there are no reasons to do an attempt to allocate
1058 assert(old_limit == eden()->end(), "sanity check");
1059 break;
1060 }
1061 // Try to allocate until succeeded or the soft limit can't be adjusted
1062 result = eden()->par_allocate(word_size);
1063 } while (result == NULL);
1064
1065 // If the eden is full and the last collection bailed out, we are running
1066 // out of heap space, and we try to allocate the from-space, too.
1067 // allocate_from_space can't be inlined because that would introduce a
1068 // circular dependency at compile time.
1069 if (result == NULL) {
1070 result = allocate_from_space(word_size);
1071 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1072 _next_gen->sample_eden_chunk();
1073 }
1074 return result;
1075 }
1076
1077 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1078 bool is_tlab) {
1079 HeapWord* res = eden()->par_allocate(word_size);
1080 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1081 _next_gen->sample_eden_chunk();
1082 }
1083 return res;
1084 }
1085
1086 void DefNewGeneration::gc_prologue(bool full) {
1087 // Ensure that _end and _soft_end are the same in eden space.
1088 eden()->set_soft_end(eden()->end());
1089 }
1090
1091 size_t DefNewGeneration::tlab_capacity() const {
1092 return eden()->capacity();
1093 }
1094
1095 size_t DefNewGeneration::tlab_used() const {
1096 return eden()->used();
1097 }
1098
1099 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1100 return unsafe_max_alloc_nogc();
1101 }
|
213 _max_survivor_size = compute_survivor_size(size, alignment);
214 _max_eden_size = size - (2*_max_survivor_size);
215
216 // allocate the performance counters
217 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
218
219 // Generation counters -- generation 0, 3 subspaces
220 _gen_counters = new GenerationCounters("new", 0, 3,
221 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
222 _gc_counters = new CollectorCounters(policy, 0);
223
224 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
225 _gen_counters);
226 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
227 _gen_counters);
228 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
229 _gen_counters);
230
231 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
232 update_counters();
233 _old_gen = NULL;
234 _tenuring_threshold = MaxTenuringThreshold;
235 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
236
237 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
238 }
239
240 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
241 bool clear_space,
242 bool mangle_space) {
243 uintx alignment =
244 GenCollectedHeap::heap()->collector_policy()->space_alignment();
245
246 // If the spaces are being cleared (only done at heap initialization
247 // currently), the survivor spaces need not be empty.
248 // Otherwise, no care is taken for used areas in the survivor spaces
249 // so check.
250 assert(clear_space || (to()->is_empty() && from()->is_empty()),
251 "Initialization of the survivor spaces assumes these are empty");
252
253 // Compute sizes
370 }
371
372 return success;
373 }
374
375
376 void DefNewGeneration::compute_new_size() {
377 // This is called after a gc that includes the following generation
378 // (which is required to exist.) So from-space will normally be empty.
379 // Note that we check both spaces, since if scavenge failed they revert roles.
380 // If not we bail out (otherwise we would have to relocate the objects)
381 if (!from()->is_empty() || !to()->is_empty()) {
382 return;
383 }
384
385 int next_level = level() + 1;
386 GenCollectedHeap* gch = GenCollectedHeap::heap();
387 assert(next_level < gch->_n_gens,
388 "DefNewGeneration cannot be an oldest gen");
389
390 Generation* old_gen = gch->old_gen();
391 size_t old_size = old_gen->capacity();
392 size_t new_size_before = _virtual_space.committed_size();
393 size_t min_new_size = spec()->init_size();
394 size_t max_new_size = reserved().byte_size();
395 assert(min_new_size <= new_size_before &&
396 new_size_before <= max_new_size,
397 "just checking");
398 // All space sizes must be multiples of Generation::GenGrain.
399 size_t alignment = Generation::GenGrain;
400
401 // Compute desired new generation size based on NewRatio and
402 // NewSizeThreadIncrease
403 size_t desired_new_size = old_size/NewRatio;
404 int threads_count = Threads::number_of_non_daemon_threads();
405 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
406 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
407
408 // Adjust new generation size
409 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
410 assert(desired_new_size <= max_new_size, "just checking");
411
555 }
556
557 void DefNewGeneration::adjust_desired_tenuring_threshold() {
558 // Set the desired survivor size to half the real survivor space
559 _tenuring_threshold =
560 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
561 }
562
563 void DefNewGeneration::collect(bool full,
564 bool clear_all_soft_refs,
565 size_t size,
566 bool is_tlab) {
567 assert(full || size > 0, "otherwise we don't want to collect");
568
569 GenCollectedHeap* gch = GenCollectedHeap::heap();
570
571 _gc_timer->register_gc_start();
572 DefNewTracer gc_tracer;
573 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
574
575 _old_gen = gch->old_gen();
576
577 // If the next generation is too full to accommodate promotion
578 // from this generation, pass on collection; let the next generation
579 // do it.
580 if (!collection_attempt_is_safe()) {
581 if (Verbose && PrintGCDetails) {
582 gclog_or_tty->print(" :: Collection attempt not safe :: ");
583 }
584 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
585 return;
586 }
587 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
588
589 init_assuming_no_promotion_failure();
590
591 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
592 // Capture heap used before collection (for printing).
593 size_t gch_prev_used = gch->used();
594
595 gch->trace_heap_before_gc(&gc_tracer);
675 size_policy->reset_gc_overhead_limit_count();
676 assert(!gch->incremental_collection_failed(), "Should be clear");
677 } else {
678 assert(_promo_failure_scan_stack.is_empty(), "post condition");
679 _promo_failure_scan_stack.clear(true); // Clear cached segments.
680
681 remove_forwarding_pointers();
682 if (PrintGCDetails) {
683 gclog_or_tty->print(" (promotion failed) ");
684 }
685 // Add to-space to the list of space to compact
686 // when a promotion failure has occurred. In that
687 // case there can be live objects in to-space
688 // as a result of a partial evacuation of eden
689 // and from-space.
690 swap_spaces(); // For uniformity wrt ParNewGeneration.
691 from()->set_next_compaction_space(to());
692 gch->set_incremental_collection_failed();
693
694 // Inform the next generation that a promotion failure occurred.
695 _old_gen->promotion_failure_occurred();
696 gc_tracer.report_promotion_failed(_promotion_failed_info);
697
698 // Reset the PromotionFailureALot counters.
699 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
700 }
701 if (PrintGC && !PrintGCDetails) {
702 gch->print_heap_change(gch_prev_used);
703 }
704 // set new iteration safe limit for the survivor spaces
705 from()->set_concurrent_iteration_safe_limit(from()->top());
706 to()->set_concurrent_iteration_safe_limit(to()->top());
707 SpecializationStats::print();
708
709 // We need to use a monotonically non-decreasing time in ms
710 // or we will see time-warp warnings and os::javaTimeMillis()
711 // does not guarantee monotonicity.
712 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
713 update_time_of_last_gc(now);
714
715 gch->trace_heap_after_gc(&gc_tracer);
780 // prevent recursion in copy_to_survivor_space()
781 _promo_failure_drain_in_progress = true;
782 drain_promo_failure_scan_stack();
783 _promo_failure_drain_in_progress = false;
784 }
785 }
786
787 oop DefNewGeneration::copy_to_survivor_space(oop old) {
788 assert(is_in_reserved(old) && !old->is_forwarded(),
789 "shouldn't be scavenging this oop");
790 size_t s = old->size();
791 oop obj = NULL;
792
793 // Try allocating obj in to-space (unless too old)
794 if (old->age() < tenuring_threshold()) {
795 obj = (oop) to()->allocate_aligned(s);
796 }
797
798 // Otherwise try allocating obj tenured
799 if (obj == NULL) {
800 obj = _old_gen->promote(old, s);
801 if (obj == NULL) {
802 handle_promotion_failure(old);
803 return old;
804 }
805 } else {
806 // Prefetch beyond obj
807 const intx interval = PrefetchCopyIntervalInBytes;
808 Prefetch::write(obj, interval);
809
810 // Copy obj
811 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
812
813 // Increment age if obj still in new generation
814 obj->incr_age();
815 age_table()->add(obj, s);
816 }
817
818 // Done, insert forward pointer to obj in this header
819 old->forward_to(obj);
820
885 list = sb;
886 }
887 }
888
889 void DefNewGeneration::reset_scratch() {
890 // If contributing scratch in to_space, mangle all of
891 // to_space if ZapUnusedHeapArea. This is needed because
892 // top is not maintained while using to-space as scratch.
893 if (ZapUnusedHeapArea) {
894 to()->mangle_unused_area_complete();
895 }
896 }
897
898 bool DefNewGeneration::collection_attempt_is_safe() {
899 if (!to()->is_empty()) {
900 if (Verbose && PrintGCDetails) {
901 gclog_or_tty->print(" :: to is not empty :: ");
902 }
903 return false;
904 }
905 if (_old_gen == NULL) {
906 GenCollectedHeap* gch = GenCollectedHeap::heap();
907 _old_gen = gch->old_gen();
908 }
909 return _old_gen->promotion_attempt_is_safe(used());
910 }
911
912 void DefNewGeneration::gc_epilogue(bool full) {
913 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
914
915 assert(!GC_locker::is_active(), "We should not be executing here");
916 // Check if the heap is approaching full after a collection has
917 // been done. Generally the young generation is empty at
918 // a minimum at the end of a collection. If it is not, then
919 // the heap is approaching full.
920 GenCollectedHeap* gch = GenCollectedHeap::heap();
921 if (full) {
922 DEBUG_ONLY(seen_incremental_collection_failed = false;)
923 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
924 if (Verbose && PrintGCDetails) {
925 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
926 GCCause::to_string(gch->gc_cause()));
927 }
928 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
929 set_should_allocate_from_space(); // we seem to be running out of space
1009 void DefNewGeneration::print_on(outputStream* st) const {
1010 Generation::print_on(st);
1011 st->print(" eden");
1012 eden()->print_on(st);
1013 st->print(" from");
1014 from()->print_on(st);
1015 st->print(" to ");
1016 to()->print_on(st);
1017 }
1018
1019
1020 const char* DefNewGeneration::name() const {
1021 return "def new generation";
1022 }
1023
1024 // Moved from inline file as they are not called inline
1025 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1026 return eden();
1027 }
1028
1029 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1030 // This is the slow-path allocation for the DefNewGeneration.
1031 // Most allocations are fast-path in compiled code.
1032 // We try to allocate from the eden. If that works, we are happy.
1033 // Note that since DefNewGeneration supports lock-free allocation, we
1034 // have to use it here, as well.
1035 HeapWord* result = eden()->par_allocate(word_size);
1036 if (result != NULL) {
1037 if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1038 _old_gen->sample_eden_chunk();
1039 }
1040 return result;
1041 }
1042 do {
1043 HeapWord* old_limit = eden()->soft_end();
1044 if (old_limit < eden()->end()) {
1045 // Tell the old generation we reached a limit.
1046 HeapWord* new_limit =
1047 _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size);
1048 if (new_limit != NULL) {
1049 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1050 } else {
1051 assert(eden()->soft_end() == eden()->end(),
1052 "invalid state after allocation_limit_reached returned null");
1053 }
1054 } else {
1055 // The allocation failed and the soft limit is equal to the hard limit,
1056 // there are no reasons to do an attempt to allocate
1057 assert(old_limit == eden()->end(), "sanity check");
1058 break;
1059 }
1060 // Try to allocate until succeeded or the soft limit can't be adjusted
1061 result = eden()->par_allocate(word_size);
1062 } while (result == NULL);
1063
1064 // If the eden is full and the last collection bailed out, we are running
1065 // out of heap space, and we try to allocate the from-space, too.
1066 // allocate_from_space can't be inlined because that would introduce a
1067 // circular dependency at compile time.
1068 if (result == NULL) {
1069 result = allocate_from_space(word_size);
1070 } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1071 _old_gen->sample_eden_chunk();
1072 }
1073 return result;
1074 }
1075
1076 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1077 bool is_tlab) {
1078 HeapWord* res = eden()->par_allocate(word_size);
1079 if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1080 _old_gen->sample_eden_chunk();
1081 }
1082 return res;
1083 }
1084
1085 void DefNewGeneration::gc_prologue(bool full) {
1086 // Ensure that _end and _soft_end are the same in eden space.
1087 eden()->set_soft_end(eden()->end());
1088 }
1089
1090 size_t DefNewGeneration::tlab_capacity() const {
1091 return eden()->capacity();
1092 }
1093
1094 size_t DefNewGeneration::tlab_used() const {
1095 return eden()->used();
1096 }
1097
1098 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1099 return unsafe_max_alloc_nogc();
1100 }
|