3040 } 3041 } 3042 3043 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { 3044 return cmsSpace()->no_allocs_since_save_marks(); 3045 } 3046 3047 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 3048 \ 3049 void ConcurrentMarkSweepGeneration:: \ 3050 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 3051 cl->set_generation(this); \ 3052 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ 3053 cl->reset_generation(); \ 3054 save_marks(); \ 3055 } 3056 3057 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) 3058 3059 void 3060 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) 3061 { 3062 // Not currently implemented; need to do the following. -- ysr. 3063 // dld -- I think that is used for some sort of allocation profiler. So it 3064 // really means the objects allocated by the mutator since the last 3065 // GC. We could potentially implement this cheaply by recording only 3066 // the direct allocations in a side data structure. 3067 // 3068 // I think we probably ought not to be required to support these 3069 // iterations at any arbitrary point; I think there ought to be some 3070 // call to enable/disable allocation profiling in a generation/space, 3071 // and the iterator ought to return the objects allocated in the 3072 // gen/space since the enable call, or the last iterator call (which 3073 // will probably be at a GC.) That way, for gens like CM&S that would 3074 // require some extra data structure to support this, we only pay the 3075 // cost when it's in use... 3076 cmsSpace()->object_iterate_since_last_GC(blk); 3077 } 3078 3079 void 3080 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 3081 cl->set_generation(this); 3082 younger_refs_in_space_iterate(_cmsSpace, cl); 3083 cl->reset_generation(); 3084 } 3085 3086 void 3087 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { 3088 if (freelistLock()->owned_by_self()) { 3089 Generation::oop_iterate(mr, cl); 3090 } else { 3091 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3092 Generation::oop_iterate(mr, cl); 3093 } 3094 } 3095 3096 void 3097 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { 3098 if (freelistLock()->owned_by_self()) { 3099 Generation::oop_iterate(cl); | 3040 } 3041 } 3042 3043 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { 3044 return cmsSpace()->no_allocs_since_save_marks(); 3045 } 3046 3047 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 3048 \ 3049 void ConcurrentMarkSweepGeneration:: \ 3050 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 3051 cl->set_generation(this); \ 3052 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ 3053 cl->reset_generation(); \ 3054 save_marks(); \ 3055 } 3056 3057 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) 3058 3059 void 3060 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 3061 cl->set_generation(this); 3062 younger_refs_in_space_iterate(_cmsSpace, cl); 3063 cl->reset_generation(); 3064 } 3065 3066 void 3067 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { 3068 if (freelistLock()->owned_by_self()) { 3069 Generation::oop_iterate(mr, cl); 3070 } else { 3071 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3072 Generation::oop_iterate(mr, cl); 3073 } 3074 } 3075 3076 void 3077 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { 3078 if (freelistLock()->owned_by_self()) { 3079 Generation::oop_iterate(cl); |