776 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
777 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
778 if (Verbose && PrintGCDetails) {
779 gclog_or_tty->print_cr(
780 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
781 "max_promo("SIZE_FORMAT")",
782 res? "":" not", available, res? ">=":"<",
783 av_promo, max_promotion_in_bytes);
784 }
785 return res;
786 }
787
788 // At a promotion failure dump information on block layout in heap
789 // (cms old generation).
790 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
791 if (CMSDumpAtPromotionFailure) {
792 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
793 }
794 }
795
796 CompactibleSpace*
797 ConcurrentMarkSweepGeneration::first_compaction_space() const {
798 return _cmsSpace;
799 }
800
801 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
802 // Clear the promotion information. These pointers can be adjusted
803 // along with all the other pointers into the heap but
804 // compaction is expected to be a rare event with
805 // a heap using cms so don't do it without seeing the need.
806 for (uint i = 0; i < ParallelGCThreads; i++) {
807 _par_gc_thread_states[i]->promo.reset();
808 }
809 }
810
811 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
812 blk->do_space(_cmsSpace);
813 }
814
815 void ConcurrentMarkSweepGeneration::compute_new_size() {
816 assert_locked_or_safepoint(Heap_lock);
817
818 // If incremental collection failed, we just want to expand
819 // to the limit.
820 if (incremental_collection_failed()) {
821 clear_incremental_collection_failed();
822 grow_to_reserved();
823 return;
824 }
825
826 // The heap has been compacted but not reset yet.
827 // Any metric such as free() or used() will be incorrect.
828
829 CardGeneration::compute_new_size();
830
831 // Reset again after a possible resizing
832 if (did_compact()) {
833 cmsSpace()->reset_after_compaction();
834 }
865 gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
866 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
867 desired_capacity/1000);
868 int prev_level = level() - 1;
869 if (prev_level >= 0) {
870 size_t prev_size = 0;
871 GenCollectedHeap* gch = GenCollectedHeap::heap();
872 Generation* prev_gen = gch->_gens[prev_level];
873 prev_size = prev_gen->capacity();
874 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
875 prev_size/1000);
876 }
877 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
878 unsafe_max_alloc_nogc()/1000);
879 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
880 contiguous_available()/1000);
881 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
882 expand_bytes);
883 }
884 // safe if expansion fails
885 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
886 if (PrintGCDetails && Verbose) {
887 gclog_or_tty->print_cr(" Expanded free fraction %f",
888 ((double) free()) / capacity());
889 }
890 } else {
891 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
892 assert(desired_capacity <= capacity(), "invalid expansion size");
893 size_t shrink_bytes = capacity() - desired_capacity;
894 // Don't shrink unless the delta is greater than the minimum shrink we want
895 if (shrink_bytes >= MinHeapDeltaBytes) {
896 shrink_free_list_by(shrink_bytes);
897 }
898 }
899 }
900
901 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
902 return cmsSpace()->freelistLock();
903 }
904
905 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1031 }
1032 }
1033 }
1034
1035 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1036 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1037 // allocate, copy and if necessary update promoinfo --
1038 // delegate to underlying space.
1039 assert_lock_strong(freelistLock());
1040
1041 #ifndef PRODUCT
1042 if (Universe::heap()->promotion_should_fail()) {
1043 return NULL;
1044 }
1045 #endif // #ifndef PRODUCT
1046
1047 oop res = _cmsSpace->promote(obj, obj_size);
1048 if (res == NULL) {
1049 // expand and retry
1050 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1051 expand(s*HeapWordSize, MinHeapDeltaBytes,
1052 CMSExpansionCause::_satisfy_promotion);
1053 // Since there's currently no next generation, we don't try to promote
1054 // into a more senior generation.
1055 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1056 "is made to pass on a possibly failing "
1057 "promotion to next generation");
1058 res = _cmsSpace->promote(obj, obj_size);
1059 }
1060 if (res != NULL) {
1061 // See comment in allocate() about when objects should
1062 // be allocated live.
1063 assert(obj->is_oop(), "Will dereference klass pointer below");
1064 collector()->promoted(false, // Not parallel
1065 (HeapWord*)res, obj->is_objArray(), obj_size);
1066 // promotion counters
1067 NOT_PRODUCT(
1068 _numObjectsPromoted++;
1069 _numWordsPromoted +=
1070 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1071 )
1072 }
2608 }
2609 }
2610
2611 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2612 return cmsSpace()->no_allocs_since_save_marks();
2613 }
2614
2615 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2616 \
2617 void ConcurrentMarkSweepGeneration:: \
2618 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2619 cl->set_generation(this); \
2620 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2621 cl->reset_generation(); \
2622 save_marks(); \
2623 }
2624
2625 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2626
2627 void
2628 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2629 cl->set_generation(this);
2630 younger_refs_in_space_iterate(_cmsSpace, cl);
2631 cl->reset_generation();
2632 }
2633
2634 void
2635 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2636 if (freelistLock()->owned_by_self()) {
2637 Generation::oop_iterate(cl);
2638 } else {
2639 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2640 Generation::oop_iterate(cl);
2641 }
2642 }
2643
2644 void
2645 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2646 if (freelistLock()->owned_by_self()) {
2647 Generation::object_iterate(cl);
2648 } else {
2649 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2650 Generation::object_iterate(cl);
2651 }
2652 }
2653
2654 void
2786
2787 #ifndef PRODUCT
2788 HeapWord* CMSCollector::block_start(const void* p) const {
2789 const HeapWord* addr = (HeapWord*)p;
2790 if (_span.contains(p)) {
2791 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2792 return _cmsGen->cmsSpace()->block_start(p);
2793 }
2794 }
2795 return NULL;
2796 }
2797 #endif
2798
2799 HeapWord*
2800 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2801 bool tlab,
2802 bool parallel) {
2803 CMSSynchronousYieldRequest yr;
2804 assert(!tlab, "Can't deal with TLAB allocation");
2805 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2806 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
2807 CMSExpansionCause::_satisfy_allocation);
2808 if (GCExpandToAllocateDelayMillis > 0) {
2809 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2810 }
2811 return have_lock_and_allocate(word_size, tlab);
2812 }
2813
2814 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
2815 // TenuredGeneration, which makes me wonder if we should move this
2816 // to CardGeneration and share it...
2817 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
2818 return CardGeneration::expand(bytes, expand_bytes);
2819 }
2820
2821 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
2822 CMSExpansionCause::Cause cause)
2823 {
2824
2825 bool success = expand(bytes, expand_bytes);
2826
2827 // remember why we expanded; this information is used
2828 // by shouldConcurrentCollect() when making decisions on whether to start
2829 // a new CMS cycle.
2830 if (success) {
2831 set_expansion_cause(cause);
2832 if (PrintGCDetails && Verbose) {
2833 gclog_or_tty->print_cr("Expanded CMS gen for %s",
2834 CMSExpansionCause::to_string(cause));
2835 }
2836 }
2837 }
2838
2839 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2840 HeapWord* res = NULL;
2841 MutexLocker x(ParGCRareEvent_lock);
2842 while (true) {
2843 // Expansion by some other thread might make alloc OK now:
2844 res = ps->lab.alloc(word_sz);
2845 if (res != NULL) return res;
2846 // If there's not enough expansion space available, give up.
2847 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2848 return NULL;
2849 }
2850 // Otherwise, we try expansion.
2851 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
2852 CMSExpansionCause::_allocate_par_lab);
2853 // Now go around the loop and try alloc again;
2854 // A competing par_promote might beat us to the expansion space,
2855 // so we may go around the loop again if promotion fails again.
2856 if (GCExpandToAllocateDelayMillis > 0) {
2857 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2858 }
2859 }
2860 }
2861
2862
2863 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2864 PromotionInfo* promo) {
2865 MutexLocker x(ParGCRareEvent_lock);
2866 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2867 while (true) {
2868 // Expansion by some other thread might make alloc OK now:
2869 if (promo->ensure_spooling_space()) {
2870 assert(promo->has_spooling_space(),
2871 "Post-condition of successful ensure_spooling_space()");
2872 return true;
2873 }
2874 // If there's not enough expansion space available, give up.
2875 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2876 return false;
2877 }
2878 // Otherwise, we try expansion.
2879 expand(refill_size_bytes, MinHeapDeltaBytes,
2880 CMSExpansionCause::_allocate_par_spooling_space);
2881 // Now go around the loop and try alloc again;
2882 // A competing allocation might beat us to the expansion space,
2883 // so we may go around the loop again if allocation fails again.
2884 if (GCExpandToAllocateDelayMillis > 0) {
2885 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2886 }
2887 }
2888 }
2889
2890
2891 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
2892 assert_locked_or_safepoint(ExpandHeap_lock);
2893 // Shrink committed space
2894 _virtual_space.shrink_by(bytes);
2895 // Shrink space; this also shrinks the space's BOT
2896 _cmsSpace->set_end((HeapWord*) _virtual_space.high());
2897 size_t new_word_size = heap_word_size(_cmsSpace->capacity());
2898 // Shrink the shared block offset array
2899 _bts->resize(new_word_size);
2900 MemRegion mr(_cmsSpace->bottom(), new_word_size);
2901 // Shrink the card table
2902 Universe::heap()->barrier_set()->resize_covered_region(mr);
2903
2904 if (Verbose && PrintGC) {
2905 size_t new_mem_size = _virtual_space.committed_size();
2906 size_t old_mem_size = new_mem_size + bytes;
2907 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
2908 name(), old_mem_size/K, new_mem_size/K);
2909 }
2910 }
2911
2912 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2913 assert_locked_or_safepoint(Heap_lock);
2914 size_t size = ReservedSpace::page_align_size_down(bytes);
2915 // Only shrink if a compaction was done so that all the free space
2916 // in the generation is in a contiguous block at the end.
2917 if (size > 0 && did_compact()) {
2918 shrink_by(size);
2919 }
2920 }
2921
2922 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
2923 assert_locked_or_safepoint(Heap_lock);
2924 bool result = _virtual_space.expand_by(bytes);
2925 if (result) {
2926 size_t new_word_size =
2927 heap_word_size(_virtual_space.committed_size());
2928 MemRegion mr(_cmsSpace->bottom(), new_word_size);
2929 _bts->resize(new_word_size); // resize the block offset shared array
2930 Universe::heap()->barrier_set()->resize_covered_region(mr);
2931 // Hmmmm... why doesn't CFLS::set_end verify locking?
2932 // This is quite ugly; FIX ME XXX
2933 _cmsSpace->assert_locked(freelistLock());
2934 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
2935
2936 // update the space and generation capacity counters
2937 if (UsePerfData) {
2938 _space_counters->update_capacity();
2939 _gen_counters->update_all();
2940 }
2941
2942 if (Verbose && PrintGC) {
2943 size_t new_mem_size = _virtual_space.committed_size();
2944 size_t old_mem_size = new_mem_size - bytes;
2945 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
2946 name(), old_mem_size/K, bytes/K, new_mem_size/K);
2947 }
2948 }
2949 return result;
2950 }
2951
2952 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
2953 assert_locked_or_safepoint(Heap_lock);
2954 bool success = true;
2955 const size_t remaining_bytes = _virtual_space.uncommitted_size();
2956 if (remaining_bytes > 0) {
2957 success = grow_by(remaining_bytes);
2958 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
2959 }
2960 return success;
2961 }
2962
2963 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2964 assert_locked_or_safepoint(Heap_lock);
2965 assert_lock_strong(freelistLock());
2966 if (PrintGCDetails && Verbose) {
2967 warning("Shrinking of CMS not yet implemented");
2968 }
2969 return;
2970 }
2971
2972
2973 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2974 // phases.
2975 class CMSPhaseAccounting: public StackObj {
2976 public:
2977 CMSPhaseAccounting(CMSCollector *collector,
2978 const char *phase,
2979 const GCId gc_id,
2980 bool print_cr = true);
|
776 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
777 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
778 if (Verbose && PrintGCDetails) {
779 gclog_or_tty->print_cr(
780 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
781 "max_promo("SIZE_FORMAT")",
782 res? "":" not", available, res? ">=":"<",
783 av_promo, max_promotion_in_bytes);
784 }
785 return res;
786 }
787
788 // At a promotion failure dump information on block layout in heap
789 // (cms old generation).
790 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
791 if (CMSDumpAtPromotionFailure) {
792 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
793 }
794 }
795
796 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
797 // Clear the promotion information. These pointers can be adjusted
798 // along with all the other pointers into the heap but
799 // compaction is expected to be a rare event with
800 // a heap using cms so don't do it without seeing the need.
801 for (uint i = 0; i < ParallelGCThreads; i++) {
802 _par_gc_thread_states[i]->promo.reset();
803 }
804 }
805
806 void ConcurrentMarkSweepGeneration::compute_new_size() {
807 assert_locked_or_safepoint(Heap_lock);
808
809 // If incremental collection failed, we just want to expand
810 // to the limit.
811 if (incremental_collection_failed()) {
812 clear_incremental_collection_failed();
813 grow_to_reserved();
814 return;
815 }
816
817 // The heap has been compacted but not reset yet.
818 // Any metric such as free() or used() will be incorrect.
819
820 CardGeneration::compute_new_size();
821
822 // Reset again after a possible resizing
823 if (did_compact()) {
824 cmsSpace()->reset_after_compaction();
825 }
856 gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
857 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
858 desired_capacity/1000);
859 int prev_level = level() - 1;
860 if (prev_level >= 0) {
861 size_t prev_size = 0;
862 GenCollectedHeap* gch = GenCollectedHeap::heap();
863 Generation* prev_gen = gch->_gens[prev_level];
864 prev_size = prev_gen->capacity();
865 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
866 prev_size/1000);
867 }
868 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
869 unsafe_max_alloc_nogc()/1000);
870 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
871 contiguous_available()/1000);
872 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
873 expand_bytes);
874 }
875 // safe if expansion fails
876 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
877 if (PrintGCDetails && Verbose) {
878 gclog_or_tty->print_cr(" Expanded free fraction %f",
879 ((double) free()) / capacity());
880 }
881 } else {
882 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
883 assert(desired_capacity <= capacity(), "invalid expansion size");
884 size_t shrink_bytes = capacity() - desired_capacity;
885 // Don't shrink unless the delta is greater than the minimum shrink we want
886 if (shrink_bytes >= MinHeapDeltaBytes) {
887 shrink_free_list_by(shrink_bytes);
888 }
889 }
890 }
891
892 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
893 return cmsSpace()->freelistLock();
894 }
895
896 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1022 }
1023 }
1024 }
1025
1026 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1027 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1028 // allocate, copy and if necessary update promoinfo --
1029 // delegate to underlying space.
1030 assert_lock_strong(freelistLock());
1031
1032 #ifndef PRODUCT
1033 if (Universe::heap()->promotion_should_fail()) {
1034 return NULL;
1035 }
1036 #endif // #ifndef PRODUCT
1037
1038 oop res = _cmsSpace->promote(obj, obj_size);
1039 if (res == NULL) {
1040 // expand and retry
1041 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1042 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
1043 // Since there's currently no next generation, we don't try to promote
1044 // into a more senior generation.
1045 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1046 "is made to pass on a possibly failing "
1047 "promotion to next generation");
1048 res = _cmsSpace->promote(obj, obj_size);
1049 }
1050 if (res != NULL) {
1051 // See comment in allocate() about when objects should
1052 // be allocated live.
1053 assert(obj->is_oop(), "Will dereference klass pointer below");
1054 collector()->promoted(false, // Not parallel
1055 (HeapWord*)res, obj->is_objArray(), obj_size);
1056 // promotion counters
1057 NOT_PRODUCT(
1058 _numObjectsPromoted++;
1059 _numWordsPromoted +=
1060 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1061 )
1062 }
2598 }
2599 }
2600
2601 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2602 return cmsSpace()->no_allocs_since_save_marks();
2603 }
2604
2605 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2606 \
2607 void ConcurrentMarkSweepGeneration:: \
2608 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2609 cl->set_generation(this); \
2610 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2611 cl->reset_generation(); \
2612 save_marks(); \
2613 }
2614
2615 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2616
2617 void
2618 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2619 if (freelistLock()->owned_by_self()) {
2620 Generation::oop_iterate(cl);
2621 } else {
2622 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2623 Generation::oop_iterate(cl);
2624 }
2625 }
2626
2627 void
2628 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2629 if (freelistLock()->owned_by_self()) {
2630 Generation::object_iterate(cl);
2631 } else {
2632 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2633 Generation::object_iterate(cl);
2634 }
2635 }
2636
2637 void
2769
2770 #ifndef PRODUCT
2771 HeapWord* CMSCollector::block_start(const void* p) const {
2772 const HeapWord* addr = (HeapWord*)p;
2773 if (_span.contains(p)) {
2774 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2775 return _cmsGen->cmsSpace()->block_start(p);
2776 }
2777 }
2778 return NULL;
2779 }
2780 #endif
2781
2782 HeapWord*
2783 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2784 bool tlab,
2785 bool parallel) {
2786 CMSSynchronousYieldRequest yr;
2787 assert(!tlab, "Can't deal with TLAB allocation");
2788 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2789 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2790 if (GCExpandToAllocateDelayMillis > 0) {
2791 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2792 }
2793 return have_lock_and_allocate(word_size, tlab);
2794 }
2795
2796 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2797 size_t bytes,
2798 size_t expand_bytes,
2799 CMSExpansionCause::Cause cause)
2800 {
2801
2802 bool success = expand(bytes, expand_bytes);
2803
2804 // remember why we expanded; this information is used
2805 // by shouldConcurrentCollect() when making decisions on whether to start
2806 // a new CMS cycle.
2807 if (success) {
2808 set_expansion_cause(cause);
2809 if (PrintGCDetails && Verbose) {
2810 gclog_or_tty->print_cr("Expanded CMS gen for %s",
2811 CMSExpansionCause::to_string(cause));
2812 }
2813 }
2814 }
2815
2816 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2817 HeapWord* res = NULL;
2818 MutexLocker x(ParGCRareEvent_lock);
2819 while (true) {
2820 // Expansion by some other thread might make alloc OK now:
2821 res = ps->lab.alloc(word_sz);
2822 if (res != NULL) return res;
2823 // If there's not enough expansion space available, give up.
2824 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2825 return NULL;
2826 }
2827 // Otherwise, we try expansion.
2828 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2829 // Now go around the loop and try alloc again;
2830 // A competing par_promote might beat us to the expansion space,
2831 // so we may go around the loop again if promotion fails again.
2832 if (GCExpandToAllocateDelayMillis > 0) {
2833 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2834 }
2835 }
2836 }
2837
2838
2839 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2840 PromotionInfo* promo) {
2841 MutexLocker x(ParGCRareEvent_lock);
2842 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2843 while (true) {
2844 // Expansion by some other thread might make alloc OK now:
2845 if (promo->ensure_spooling_space()) {
2846 assert(promo->has_spooling_space(),
2847 "Post-condition of successful ensure_spooling_space()");
2848 return true;
2849 }
2850 // If there's not enough expansion space available, give up.
2851 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2852 return false;
2853 }
2854 // Otherwise, we try expansion.
2855 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2856 // Now go around the loop and try alloc again;
2857 // A competing allocation might beat us to the expansion space,
2858 // so we may go around the loop again if allocation fails again.
2859 if (GCExpandToAllocateDelayMillis > 0) {
2860 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2861 }
2862 }
2863 }
2864
2865 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2866 // Only shrink if a compaction was done so that all the free space
2867 // in the generation is in a contiguous block at the end.
2868 if (did_compact()) {
2869 CardGeneration::shrink(bytes);
2870 }
2871 }
2872
2873 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2874 assert_locked_or_safepoint(Heap_lock);
2875 }
2876
2877 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2878 assert_locked_or_safepoint(Heap_lock);
2879 assert_lock_strong(freelistLock());
2880 if (PrintGCDetails && Verbose) {
2881 warning("Shrinking of CMS not yet implemented");
2882 }
2883 return;
2884 }
2885
2886
2887 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2888 // phases.
2889 class CMSPhaseAccounting: public StackObj {
2890 public:
2891 CMSPhaseAccounting(CMSCollector *collector,
2892 const char *phase,
2893 const GCId gc_id,
2894 bool print_cr = true);
|