< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 8070 : imported patch gencollected_heap_cleanup


 282 
 283 void CMSCollector::ref_processor_init() {
 284   if (_ref_processor == NULL) {
 285     // Allocate and initialize a reference processor
 286     _ref_processor =
 287       new ReferenceProcessor(_span,                               // span
 288                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 289                              (int) ParallelGCThreads,             // mt processing degree
 290                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 291                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 292                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 293                              &_is_alive_closure);                 // closure for liveness info
 294     // Initialize the _ref_processor field of CMSGen
 295     _cmsGen->set_ref_processor(_ref_processor);
 296 
 297   }
 298 }
 299 
 300 AdaptiveSizePolicy* CMSCollector::size_policy() {
 301   GenCollectedHeap* gch = GenCollectedHeap::heap();
 302   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 303     "Wrong type of heap");
 304   return gch->gen_policy()->size_policy();
 305 }
 306 
 307 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 308 
 309   const char* gen_name = "old";
 310   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 311 
 312   // Generation Counters - generation 1, 1 subspace
 313   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 314       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 315 
 316   _space_counters = new GSpaceCounters(gen_name, 0,
 317                                        _virtual_space.reserved_size(),
 318                                        this, _gen_counters);
 319 }
 320 
 321 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 322   _cms_gen(cms_gen)
 323 {


 964           _modUnionTable.mark_range(mr);
 965         }
 966       } else {  // not an obj array; we can just mark the head
 967         if (par) {
 968           _modUnionTable.par_mark(start);
 969         } else {
 970           _modUnionTable.mark(start);
 971         }
 972       }
 973     }
 974   }
 975 }
 976 
 977 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 978   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 979   // allocate, copy and if necessary update promoinfo --
 980   // delegate to underlying space.
 981   assert_lock_strong(freelistLock());
 982 
 983 #ifndef PRODUCT
 984   if (Universe::heap()->promotion_should_fail()) {
 985     return NULL;
 986   }
 987 #endif  // #ifndef PRODUCT
 988 
 989   oop res = _cmsSpace->promote(obj, obj_size);
 990   if (res == NULL) {
 991     // expand and retry
 992     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 993     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 994     // Since this is the old generation, we don't try to promote
 995     // into a more senior generation.
 996     res = _cmsSpace->promote(obj, obj_size);
 997   }
 998   if (res != NULL) {
 999     // See comment in allocate() about when objects should
1000     // be allocated live.
1001     assert(obj->is_oop(), "Will dereference klass pointer below");
1002     collector()->promoted(false,           // Not parallel
1003                           (HeapWord*)res, obj->is_objArray(), obj_size);
1004     // promotion counters


1041 // STATE TRANSITION DIAGRAM
1042 //
1043 //        mut / parnew                     mut  /  parnew
1044 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1045 //  ^                                                                   |
1046 //  |------------------------ DEAD <------------------------------------|
1047 //         sweep                            mut
1048 //
1049 // While a block is in TRANSIENT state its size cannot be determined
1050 // so readers will either need to come back later or stall until
1051 // the size can be determined. Note that for the case of direct
1052 // allocation, P-bits, when available, may be used to determine the
1053 // size of an object that may not yet have been initialized.
1054 
1055 // Things to support parallel young-gen collection.
1056 oop
1057 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1058                                            oop old, markOop m,
1059                                            size_t word_sz) {
1060 #ifndef PRODUCT
1061   if (Universe::heap()->promotion_should_fail()) {
1062     return NULL;
1063   }
1064 #endif  // #ifndef PRODUCT
1065 
1066   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1067   PromotionInfo* promoInfo = &ps->promo;
1068   // if we are tracking promotions, then first ensure space for
1069   // promotion (including spooling space for saving header if necessary).
1070   // then allocate and copy, then track promoted info if needed.
1071   // When tracking (see PromotionInfo::track()), the mark word may
1072   // be displaced and in this case restoration of the mark word
1073   // occurs in the (oop_since_save_marks_)iterate phase.
1074   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1075     // Out of space for allocating spooling buffers;
1076     // try expanding and allocating spooling buffers.
1077     if (!expand_and_ensure_spooling_space(promoInfo)) {
1078       return NULL;
1079     }
1080   }
1081   assert(promoInfo->has_spooling_space(), "Control point invariant");


2451   assert(_restart_addr == NULL, "Expected pre-condition");
2452   verification_mark_bm()->iterate(&markFromRootsClosure);
2453   while (_restart_addr != NULL) {
2454     // Deal with stack overflow: by restarting at the indicated
2455     // address.
2456     HeapWord* ra = _restart_addr;
2457     markFromRootsClosure.reset(ra);
2458     _restart_addr = NULL;
2459     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2460   }
2461   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2462   verify_work_stacks_empty();
2463 
2464   // Marking completed -- now verify that each bit marked in
2465   // verification_mark_bm() is also marked in markBitMap(); flag all
2466   // errors by printing corresponding objects.
2467   VerifyMarkedClosure vcl(markBitMap());
2468   verification_mark_bm()->iterate(&vcl);
2469   if (vcl.failed()) {
2470     gclog_or_tty->print("Verification failed");
2471     Universe::heap()->print_on(gclog_or_tty);
2472     fatal("CMS: failed marking verification after remark");
2473   }
2474 }
2475 
2476 class VerifyKlassOopsKlassClosure : public KlassClosure {
2477   class VerifyKlassOopsClosure : public OopClosure {
2478     CMSBitMap* _bitmap;
2479    public:
2480     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2481     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2482     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2483   } _oop_closure;
2484  public:
2485   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2486   void do_klass(Klass* k) {
2487     k->oops_do(&_oop_closure);
2488   }
2489 };
2490 
2491 void CMSCollector::verify_after_remark_work_2() {




 282 
 283 void CMSCollector::ref_processor_init() {
 284   if (_ref_processor == NULL) {
 285     // Allocate and initialize a reference processor
 286     _ref_processor =
 287       new ReferenceProcessor(_span,                               // span
 288                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 289                              (int) ParallelGCThreads,             // mt processing degree
 290                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 291                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 292                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 293                              &_is_alive_closure);                 // closure for liveness info
 294     // Initialize the _ref_processor field of CMSGen
 295     _cmsGen->set_ref_processor(_ref_processor);
 296 
 297   }
 298 }
 299 
 300 AdaptiveSizePolicy* CMSCollector::size_policy() {
 301   GenCollectedHeap* gch = GenCollectedHeap::heap();


 302   return gch->gen_policy()->size_policy();
 303 }
 304 
 305 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 306 
 307   const char* gen_name = "old";
 308   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 309 
 310   // Generation Counters - generation 1, 1 subspace
 311   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 312       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 313 
 314   _space_counters = new GSpaceCounters(gen_name, 0,
 315                                        _virtual_space.reserved_size(),
 316                                        this, _gen_counters);
 317 }
 318 
 319 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 320   _cms_gen(cms_gen)
 321 {


 962           _modUnionTable.mark_range(mr);
 963         }
 964       } else {  // not an obj array; we can just mark the head
 965         if (par) {
 966           _modUnionTable.par_mark(start);
 967         } else {
 968           _modUnionTable.mark(start);
 969         }
 970       }
 971     }
 972   }
 973 }
 974 
 975 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 976   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 977   // allocate, copy and if necessary update promoinfo --
 978   // delegate to underlying space.
 979   assert_lock_strong(freelistLock());
 980 
 981 #ifndef PRODUCT
 982   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 983     return NULL;
 984   }
 985 #endif  // #ifndef PRODUCT
 986 
 987   oop res = _cmsSpace->promote(obj, obj_size);
 988   if (res == NULL) {
 989     // expand and retry
 990     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 991     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 992     // Since this is the old generation, we don't try to promote
 993     // into a more senior generation.
 994     res = _cmsSpace->promote(obj, obj_size);
 995   }
 996   if (res != NULL) {
 997     // See comment in allocate() about when objects should
 998     // be allocated live.
 999     assert(obj->is_oop(), "Will dereference klass pointer below");
1000     collector()->promoted(false,           // Not parallel
1001                           (HeapWord*)res, obj->is_objArray(), obj_size);
1002     // promotion counters


1039 // STATE TRANSITION DIAGRAM
1040 //
1041 //        mut / parnew                     mut  /  parnew
1042 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1043 //  ^                                                                   |
1044 //  |------------------------ DEAD <------------------------------------|
1045 //         sweep                            mut
1046 //
1047 // While a block is in TRANSIENT state its size cannot be determined
1048 // so readers will either need to come back later or stall until
1049 // the size can be determined. Note that for the case of direct
1050 // allocation, P-bits, when available, may be used to determine the
1051 // size of an object that may not yet have been initialized.
1052 
1053 // Things to support parallel young-gen collection.
1054 oop
1055 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1056                                            oop old, markOop m,
1057                                            size_t word_sz) {
1058 #ifndef PRODUCT
1059   if (GenCollectedHeap::heap()->promotion_should_fail()) {
1060     return NULL;
1061   }
1062 #endif  // #ifndef PRODUCT
1063 
1064   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1065   PromotionInfo* promoInfo = &ps->promo;
1066   // if we are tracking promotions, then first ensure space for
1067   // promotion (including spooling space for saving header if necessary).
1068   // then allocate and copy, then track promoted info if needed.
1069   // When tracking (see PromotionInfo::track()), the mark word may
1070   // be displaced and in this case restoration of the mark word
1071   // occurs in the (oop_since_save_marks_)iterate phase.
1072   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1073     // Out of space for allocating spooling buffers;
1074     // try expanding and allocating spooling buffers.
1075     if (!expand_and_ensure_spooling_space(promoInfo)) {
1076       return NULL;
1077     }
1078   }
1079   assert(promoInfo->has_spooling_space(), "Control point invariant");


2449   assert(_restart_addr == NULL, "Expected pre-condition");
2450   verification_mark_bm()->iterate(&markFromRootsClosure);
2451   while (_restart_addr != NULL) {
2452     // Deal with stack overflow: by restarting at the indicated
2453     // address.
2454     HeapWord* ra = _restart_addr;
2455     markFromRootsClosure.reset(ra);
2456     _restart_addr = NULL;
2457     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2458   }
2459   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2460   verify_work_stacks_empty();
2461 
2462   // Marking completed -- now verify that each bit marked in
2463   // verification_mark_bm() is also marked in markBitMap(); flag all
2464   // errors by printing corresponding objects.
2465   VerifyMarkedClosure vcl(markBitMap());
2466   verification_mark_bm()->iterate(&vcl);
2467   if (vcl.failed()) {
2468     gclog_or_tty->print("Verification failed");
2469     gch->print_on(gclog_or_tty);
2470     fatal("CMS: failed marking verification after remark");
2471   }
2472 }
2473 
2474 class VerifyKlassOopsKlassClosure : public KlassClosure {
2475   class VerifyKlassOopsClosure : public OopClosure {
2476     CMSBitMap* _bitmap;
2477    public:
2478     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2479     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2480     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2481   } _oop_closure;
2482  public:
2483   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2484   void do_klass(Klass* k) {
2485     k->oops_do(&_oop_closure);
2486   }
2487 };
2488 
2489 void CMSCollector::verify_after_remark_work_2() {


< prev index next >