hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)concurrentMarkSweepGeneration.hpp    1.163 08/09/25 13:47:54 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


 436 // ReferenceProcessor class.
 437 // For objects in the CMS generation, this closure checks
 438 // if the object is "live" (reachable). Used in weak
 439 // reference processing.
 440 class CMSIsAliveClosure: public BoolObjectClosure {
 441   const MemRegion  _span;
 442   const CMSBitMap* _bit_map;
 443 
 444   friend class CMSCollector;
 445  public:
 446   CMSIsAliveClosure(MemRegion span,
 447                     CMSBitMap* bit_map):
 448     _span(span),
 449     _bit_map(bit_map) {
 450       assert(!span.is_empty(), "Empty span could spell trouble");
 451     }
 452 
 453   void do_object(oop obj) {
 454     assert(false, "not to be invoked");
 455   }

 456   bool do_object_b(oop obj);
 457 };
 458 
 459 
 460 // Implements AbstractRefProcTaskExecutor for CMS.
 461 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 462 public:
 463 
 464   CMSRefProcTaskExecutor(CMSCollector& collector)
 465     : _collector(collector)
 466   { }
 467   
 468   // Executes a task using worker threads.  
 469   virtual void execute(ProcessTask& task);
 470   virtual void execute(EnqueueTask& task);
 471 private:
 472   CMSCollector& _collector;
 473 };
 474 
 475 


 519   // The following array-pair keeps track of mark words
 520   // displaced for accomodating overflow list above.
 521   // This code will likely be revisited under RFE#4922830.
 522   GrowableArray<oop>*     _preserved_oop_stack; 
 523   GrowableArray<markOop>* _preserved_mark_stack; 
 524 
 525   int*             _hash_seed;
 526 
 527   // In support of multi-threaded concurrent phases
 528   YieldingFlexibleWorkGang* _conc_workers;
 529 
 530   // Performance Counters
 531   CollectorCounters* _gc_counters;
 532 
 533   // Initialization Errors
 534   bool _completed_initialization;
 535 
 536   // In support of ExplicitGCInvokesConcurrent
 537   static   bool _full_gc_requested;
 538   unsigned int  _collection_count_start;

 539   // Should we unload classes this concurrent cycle?
 540   // Set in response to a concurrent full gc request.
 541   bool _unload_classes;
 542   bool _unloaded_classes_last_cycle;


 543   // Did we (allow) unload classes in the previous concurrent cycle?
 544   bool cms_unloaded_classes_last_cycle() const {
 545     return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
 546   }
 547 
 548   // Verification support
 549   CMSBitMap     _verification_mark_bm;
 550   void verify_after_remark_work_1();
 551   void verify_after_remark_work_2();
 552 
 553   // true if any verification flag is on.
 554   bool _verifying;
 555   bool verifying() const { return _verifying; }
 556   void set_verifying(bool v) { _verifying = v; }
 557 
 558   // Collector policy
 559   ConcurrentMarkSweepPolicy* _collector_policy;
 560   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 561 
 562   // Check whether the gc time limit has been 
 563   // exceeded and set the size policy flag
 564   // appropriately.
 565   void check_gc_time_limit();


 574   CardTableRS*                   _ct;      // card table
 575 
 576   // CMS marking support structures
 577   CMSBitMap     _markBitMap;
 578   CMSBitMap     _modUnionTable;
 579   CMSMarkStack  _markStack;
 580   CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
 581                                           // to revisit
 582   CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
 583 
 584   HeapWord*     _restart_addr; // in support of marking stack overflow
 585   void          lower_restart_addr(HeapWord* low);
 586 
 587   // Counters in support of marking stack / work queue overflow handling:
 588   // a non-zero value indicates certain types of overflow events during
 589   // the current CMS cycle and could lead to stack resizing efforts at
 590   // an opportune future time.
 591   size_t        _ser_pmc_preclean_ovflw;
 592   size_t        _ser_pmc_remark_ovflw;
 593   size_t        _par_pmc_remark_ovflw;

 594   size_t        _ser_kac_ovflw;
 595   size_t        _par_kac_ovflw;
 596   NOT_PRODUCT(size_t _num_par_pushes;)
 597 
 598   // ("Weak") Reference processing support
 599   ReferenceProcessor*            _ref_processor;
 600   CMSIsAliveClosure              _is_alive_closure;
 601       // keep this textually after _markBitMap and _span; c'tor dependency
 602 
 603   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
 604   ModUnionClosure    _modUnionClosure;
 605   ModUnionClosurePar _modUnionClosurePar;
 606 
 607   // CMS abstract state machine
 608   // initial_state: Idling
 609   // next_state(Idling)            = {Marking}
 610   // next_state(Marking)           = {Precleaning, Sweeping}
 611   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
 612   // next_state(AbortablePreclean) = {FinalMarking}
 613   // next_state(FinalMarking)      = {Sweeping}


 635   bool _between_prologue_and_epilogue;
 636 
 637   // Signalling/State related to coordination between fore- and backgroud GC
 638   // Note: When the baton has been passed from background GC to foreground GC,
 639   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
 640   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
 641                                  // wants to go active
 642   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
 643                                  // yet passed the baton to the foreground GC
 644 
 645   // Support for CMSScheduleRemark (abortable preclean)
 646   bool _abort_preclean;
 647   bool _start_sampling;
 648 
 649   int    _numYields;
 650   size_t _numDirtyCards;
 651   uint   _sweepCount;
 652   // number of full gc's since the last concurrent gc.
 653   uint   _full_gcs_since_conc_gc;
 654 
 655   // if occupancy exceeds this, start a new gc cycle
 656   double _initiatingOccupancy;
 657   // occupancy used for bootstrapping stats
 658   double _bootstrap_occupancy;
 659 
 660   // timer
 661   elapsedTimer _timer;
 662 
 663   // Timing, allocation and promotion statistics, used for scheduling.
 664   CMSStats      _stats;
 665 
 666   // Allocation limits installed in the young gen, used only in
 667   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
 668   // these limits, the cms generation is notified and the cms thread is started
 669   // or stopped, respectively.
 670   HeapWord*     _icms_start_limit;
 671   HeapWord*     _icms_stop_limit;
 672 
 673   enum CMS_op_type {
 674     CMS_op_checkpointRootsInitial,
 675     CMS_op_checkpointRootsFinal
 676   };


 809   // allocation limits in the young gen.
 810   void icms_update_allocation_limits();
 811 
 812   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 813   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 814   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 815 
 816   void setup_cms_unloading_and_verification_state();
 817  public:
 818   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 819                ConcurrentMarkSweepGeneration* permGen,
 820                CardTableRS*                   ct,
 821                ConcurrentMarkSweepPolicy*     cp);
 822   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 823 
 824   ReferenceProcessor* ref_processor() { return _ref_processor; }
 825   void ref_processor_init();
 826 
 827   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 828   static CollectorState abstract_state() { return _collectorState;  }
 829   double initiatingOccupancy() const { return _initiatingOccupancy; }
 830 
 831   bool should_abort_preclean() const; // Whether preclean should be aborted.
 832   size_t get_eden_used() const;
 833   size_t get_eden_capacity() const;
 834 
 835   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 836 
 837   // locking checks
 838   NOT_PRODUCT(static bool have_cms_token();)
 839 
 840   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 841   bool shouldConcurrentCollect();
 842 
 843   void collect(bool   full,
 844                bool   clear_all_soft_refs,
 845                size_t size,
 846                bool   tlab);
 847   void collect_in_background(bool clear_all_soft_refs);
 848   void collect_in_foreground(bool clear_all_soft_refs);
 849 
 850   // In support of ExplicitGCInvokesConcurrent
 851   static void request_full_gc(unsigned int full_gc_count);
 852   // Should we unload classes in a particular concurrent cycle?
 853   bool cms_should_unload_classes() const {
 854     assert(!_unload_classes ||  ExplicitGCInvokesConcurrentAndUnloadsClasses,
 855            "Inconsistency; see CR 6541037");
 856     return _unload_classes || CMSClassUnloadingEnabled;
 857   }

 858 
 859   void direct_allocated(HeapWord* start, size_t size);
 860 
 861   // Object is dead if not marked and current phase is sweeping.
 862   bool is_dead_obj(oop obj) const;
 863 
 864   // After a promotion (of "start"), do any necessary marking.
 865   // If "par", then it's being done by a parallel GC thread.
 866   // The last two args indicate if we need precise marking
 867   // and if so the size of the object so it can be dirtied
 868   // in its entirety.
 869   void promoted(bool par, HeapWord* start,
 870                 bool is_obj_array, size_t obj_size);
 871 
 872   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 873                                      size_t word_size);
 874 
 875   void getFreelistLocks() const;
 876   void releaseFreelistLocks() const;
 877   bool haveFreelistLocks() const;


1006   // Non-product stat counters
1007   NOT_PRODUCT(
1008     int _numObjectsPromoted;
1009     int _numWordsPromoted;
1010     int _numObjectsAllocated;
1011     int _numWordsAllocated;
1012   )
1013 
1014   // Used for sizing decisions
1015   bool _incremental_collection_failed;
1016   bool incremental_collection_failed() {
1017     return _incremental_collection_failed;
1018   }
1019   void set_incremental_collection_failed() {
1020     _incremental_collection_failed = true;
1021   }
1022   void clear_incremental_collection_failed() {
1023     _incremental_collection_failed = false;
1024   }
1025 




1026  private:
1027   // For parallel young-gen GC support.
1028   CMSParGCThreadState** _par_gc_thread_states;
1029 
1030   // Reason generation was expanded
1031   CMSExpansionCause::Cause _expansion_cause;
1032 
1033   // accessors
1034   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1035   CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
1036 
1037   // In support of MinChunkSize being larger than min object size
1038   const double _dilatation_factor;
1039 
1040   enum CollectionTypes {
1041     Concurrent_collection_type          = 0,
1042     MS_foreground_collection_type       = 1,
1043     MSC_foreground_collection_type      = 2,
1044     Unknown_collection_type             = 3
1045   };
1046 
1047   CollectionTypes _debug_collection_type;
1048 




1049  protected:
1050   // Grow generation by specified size (returns false if unable to grow)
1051   bool grow_by(size_t bytes);
1052   // Grow generation to reserved size.
1053   bool grow_to_reserved();
1054   // Shrink generation by specified size (returns false if unable to shrink)
1055   virtual void shrink_by(size_t bytes);
1056 
1057   // Update statistics for GC
1058   virtual void update_gc_stats(int level, bool full);
1059 
1060   // Maximum available space in the generation (including uncommitted)
1061   // space.
1062   size_t max_available() const;
1063 




1064  public:
1065   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1066                                 int level, CardTableRS* ct,
1067                                 bool use_adaptive_freelists,
1068                                 FreeBlockDictionary::DictionaryChoice);
1069 
1070   // Accessors
1071   CMSCollector* collector() const { return _collector; }
1072   static void set_collector(CMSCollector* collector) {
1073     assert(_collector == NULL, "already set");
1074     _collector = collector;
1075   }
1076   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1077   
1078   Mutex* freelistLock() const;
1079 
1080   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1081 
1082   // Adaptive size policy
1083   CMSAdaptiveSizePolicy* size_policy();
1084 
1085   bool refs_discovery_is_atomic() const { return false; }
1086   bool refs_discovery_is_mt()     const {
1087     // Note: CMS does MT-discovery during the parallel-remark
1088     // phases. Use ReferenceProcessorMTMutator to make refs
1089     // discovery MT-safe during such phases or other parallel
1090     // discovery phases in the future. This may all go away
1091     // if/when we decide that refs discovery is sufficiently
1092     // rare that the cost of the CAS's involved is in the
1093     // noise. That's a measurement that should be done, and
1094     // the code simplified if that turns out to be the case.
1095     return false;
1096   }
1097 
1098   // Override
1099   virtual void ref_processor_init();
1100 





1101   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1102 
1103   // Space enquiries
1104   size_t capacity() const;
1105   size_t used() const;
1106   size_t free() const;
1107   double occupancy()      { return ((double)used())/((double)capacity()); }
1108   size_t contiguous_available() const;
1109   size_t unsafe_max_alloc_nogc() const;
1110 
1111   // over-rides
1112   MemRegion used_region() const;
1113   MemRegion used_region_at_save_marks() const;
1114 
1115   // Does a "full" (forced) collection invoked on this generation collect
1116   // all younger generations as well? Note that the second conjunct is a
1117   // hack to allow the collection of the younger gen first if the flag is
1118   // set. This is better than using th policy's should_collect_gen0_first()
1119   // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1120   virtual bool full_collects_younger_generations() const {
1121     return UseCMSCompactAtFullCollection && !CollectGen0First;
1122   }
1123 
1124   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1125 
1126   // Support for compaction
1127   CompactibleSpace* first_compaction_space() const;
1128   // Adjust quantites in the generation affected by
1129   // the compaction.
1130   void reset_after_compaction();
1131 
1132   // Allocation support
1133   HeapWord* allocate(size_t size, bool tlab);
1134   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1135   oop       promote(oop obj, size_t obj_size, oop* ref);
1136   HeapWord* par_allocate(size_t size, bool tlab) {
1137     return allocate(size, tlab);
1138   }
1139 
1140   // Incremental mode triggering.
1141   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1142                                      size_t word_size);
1143 
1144   // Used by CMSStats to track direct allocation.  The value is sampled and
1145   // reset after each young gen collection.
1146   size_t direct_allocated_words() const { return _direct_allocated_words; }
1147   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1148 
1149   // Overrides for parallel promotion.
1150   virtual oop par_promote(int thread_num,
1151                           oop obj, markOop m, size_t word_sz);
1152   // This one should not be called for CMS.
1153   virtual void par_promote_alloc_undo(int thread_num,
1154                                       HeapWord* obj, size_t word_sz);
1155   virtual void par_promote_alloc_done(int thread_num);
1156   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1157 
1158   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1159     bool younger_handles_promotion_failure) const;
1160 
1161   bool should_collect(bool full, size_t size, bool tlab);
1162     // XXXPERM
1163   bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
1164   void collect(bool   full,
1165                bool   clear_all_soft_refs,
1166                size_t size,
1167                bool   tlab);
1168 
1169   HeapWord* expand_and_allocate(size_t word_size,
1170                                 bool tlab,
1171                                 bool parallel = false);
1172 
1173   // GC prologue and epilogue
1174   void gc_prologue(bool full);
1175   void gc_prologue_work(bool full, bool registerClosure,
1176                         ModUnionClosure* modUnionClosure);
1177   void gc_epilogue(bool full);
1178   void gc_epilogue_work(bool full);
1179 
1180   // Time since last GC of this generation
1181   jlong time_of_last_gc(jlong now) {
1182     return collector()->time_of_last_gc(now);
1183   }
1184   void update_time_of_last_gc(jlong now) {
1185     collector()-> update_time_of_last_gc(now);
1186   }
1187 
1188   // Allocation failure
1189   void expand(size_t bytes, size_t expand_bytes, 
1190     CMSExpansionCause::Cause cause);

1191   void shrink(size_t bytes);
1192   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1193   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1194 
1195   // Iteration support and related enquiries
1196   void save_marks();
1197   bool no_allocs_since_save_marks();
1198   void object_iterate_since_last_GC(ObjectClosure* cl);
1199   void younger_refs_iterate(OopsInGenClosure* cl);
1200 
1201   // Iteration support specific to CMS generations
1202   void save_sweep_limit();
1203 
1204   // More iteration support
1205   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1206   virtual void oop_iterate(OopClosure* cl);
1207   virtual void object_iterate(ObjectClosure* cl);
1208 
1209   // Need to declare the full complement of closures, whether we'll
1210   // override them or not, or get message from the compiler:


1278                                   bool use_adaptive_freelists,
1279                                   FreeBlockDictionary::DictionaryChoice 
1280                                     dictionaryChoice) :
1281     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1282       use_adaptive_freelists, dictionaryChoice) {}
1283 
1284   virtual const char* short_name() const { return "ASCMS"; }
1285   virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1286 
1287   virtual void update_counters();
1288   virtual void update_counters(size_t used);
1289 };
1290 
1291 //
1292 // Closures of various sorts used by CMS to accomplish its work
1293 //
1294 
1295 // This closure is used to check that a certain set of oops is empty.
1296 class FalseClosure: public OopClosure {
1297  public:
1298   void do_oop(oop* p) {
1299     guarantee(false, "Should be an empty set");
1300   }
1301 };
1302 
1303 // This closure is used to do concurrent marking from the roots
1304 // following the first checkpoint. 
1305 class MarkFromRootsClosure: public BitMapClosure {
1306   CMSCollector*  _collector;
1307   MemRegion      _span;
1308   CMSBitMap*     _bitMap;
1309   CMSBitMap*     _mut;
1310   CMSMarkStack*  _markStack;
1311   CMSMarkStack*  _revisitStack;
1312   bool           _yield;
1313   int            _skipBits;
1314   HeapWord*      _finger;
1315   HeapWord*      _threshold;
1316   DEBUG_ONLY(bool _verifying;)
1317 
1318  public:
1319   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1320                        CMSBitMap* bitMap,
1321                        CMSMarkStack*  markStack,
1322                        CMSMarkStack*  revisitStack,
1323                        bool should_yield, bool verifying = false);
1324   void do_bit(size_t offset);
1325   void reset(HeapWord* addr);
1326   inline void do_yield_check();
1327 
1328  private:
1329   void scanOopsInOop(HeapWord* ptr);
1330   void do_yield_work();
1331 };
1332 
1333 // This closure is used to do concurrent multi-threaded
1334 // marking from the roots following the first checkpoint. 
1335 // XXX This should really be a subclass of The serial version
1336 // above, but i have not had the time to refactor things cleanly.
1337 // That willbe done for Dolphin.
1338 class Par_MarkFromRootsClosure: public BitMapClosure {
1339   CMSCollector*  _collector;
1340   MemRegion      _whole_span;
1341   MemRegion      _span;
1342   CMSBitMap*     _bit_map;
1343   CMSBitMap*     _mut;
1344   OopTaskQueue*  _work_queue;
1345   CMSMarkStack*  _overflow_stack;
1346   CMSMarkStack*  _revisit_stack;
1347   bool           _yield;
1348   int            _skip_bits;
1349   HeapWord*      _finger;
1350   HeapWord*      _threshold;
1351   CMSConcMarkingTask* _task;
1352  public:
1353   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1354                        MemRegion span,
1355                        CMSBitMap* bit_map,
1356                        OopTaskQueue* work_queue,
1357                        CMSMarkStack*  overflow_stack,
1358                        CMSMarkStack*  revisit_stack,
1359                        bool should_yield);
1360   void do_bit(size_t offset);
1361   inline void do_yield_check();
1362 
1363  private:
1364   void scan_oops_in_oop(HeapWord* ptr);
1365   void do_yield_work();
1366   bool get_work_from_overflow_stack();
1367 };
1368 
1369 // The following closures are used to do certain kinds of verification of
1370 // CMS marking.
1371 class PushAndMarkVerifyClosure: public OopClosure {
1372   CMSCollector*    _collector;
1373   MemRegion        _span;
1374   CMSBitMap*       _verification_bm;
1375   CMSBitMap*       _cms_bm;
1376   CMSMarkStack*    _mark_stack;






1377  public:
1378   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1379                            MemRegion span,
1380                            CMSBitMap* verification_bm,
1381                            CMSBitMap* cms_bm,
1382                            CMSMarkStack*  mark_stack);
1383   void do_oop(oop* p);

1384   // Deal with a stack overflow condition
1385   void handle_stack_overflow(HeapWord* lost);
1386 };
1387 
1388 class MarkFromRootsVerifyClosure: public BitMapClosure {
1389   CMSCollector*  _collector;
1390   MemRegion      _span;
1391   CMSBitMap*     _verification_bm;
1392   CMSBitMap*     _cms_bm;
1393   CMSMarkStack*  _mark_stack;
1394   HeapWord*      _finger;
1395   PushAndMarkVerifyClosure _pam_verify_closure;
1396  public:
1397   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1398                              CMSBitMap* verification_bm,
1399                              CMSBitMap* cms_bm,
1400                              CMSMarkStack*  mark_stack);
1401   void do_bit(size_t offset);
1402   void reset(HeapWord* addr);
1403 };
1404 
1405 
1406 // This closure is used to check that a certain set of bits is
1407 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1408 class FalseBitMapClosure: public BitMapClosure {
1409  public:
1410   void do_bit(size_t offset) {
1411     guarantee(false, "Should not have a 1 bit"); 

1412   }
1413 };
1414 
1415 // This closure is used during the second checkpointing phase
1416 // to rescan the marked objects on the dirty cards in the mod
1417 // union table and the card table proper. It's invoked via
1418 // MarkFromDirtyCardsClosure below. It uses either
1419 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1420 // declared in genOopClosures.hpp to accomplish some of its work.
1421 // In the parallel case the bitMap is shared, so access to
1422 // it needs to be suitably synchronized for updates by embedded
1423 // closures that update it; however, this closure itself only
1424 // reads the bit_map and because it is idempotent, is immune to
1425 // reading stale values.
1426 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1427   #ifdef ASSERT
1428     CMSCollector*          _collector;
1429     MemRegion              _span;
1430     union {
1431       CMSMarkStack*        _mark_stack;


1718   // Yield
1719   void do_yield_work(HeapWord* addr);
1720 
1721   // Debugging/Printing
1722   void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1723 
1724  public:
1725   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1726                CMSBitMap* bitMap, bool should_yield);
1727   ~SweepClosure();
1728 
1729   size_t       do_blk_careful(HeapWord* addr);
1730 };
1731 
1732 // Closures related to weak references processing
1733 
1734 // During CMS' weak reference processing, this is a
1735 // work-routine/closure used to complete transitive
1736 // marking of objects as live after a certain point
1737 // in which an initial set has been completely accumulated.



1738 class CMSDrainMarkingStackClosure: public VoidClosure {
1739   CMSCollector*        _collector;
1740   MemRegion            _span;
1741   CMSMarkStack*        _mark_stack;
1742   CMSBitMap*           _bit_map;
1743   CMSKeepAliveClosure* _keep_alive;

1744  public:
1745   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1746                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1747                       CMSKeepAliveClosure* keep_alive):

1748     _collector(collector),
1749     _span(span),
1750     _bit_map(bit_map),
1751     _mark_stack(mark_stack),
1752     _keep_alive(keep_alive) { }




1753 
1754   void do_void();
1755 };
1756 
1757 // A parallel version of CMSDrainMarkingStackClosure above.
1758 class CMSParDrainMarkingStackClosure: public VoidClosure {
1759   CMSCollector*           _collector;
1760   MemRegion               _span;
1761   OopTaskQueue*           _work_queue;
1762   CMSBitMap*              _bit_map;
1763   CMSInnerParMarkAndPushClosure _mark_and_push;
1764 
1765  public:
1766   CMSParDrainMarkingStackClosure(CMSCollector* collector,
1767                                  MemRegion span, CMSBitMap* bit_map,
1768                                  OopTaskQueue* work_queue):
1769     _collector(collector),
1770     _span(span),
1771     _bit_map(bit_map),
1772     _work_queue(work_queue),


   1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)concurrentMarkSweepGeneration.hpp    1.163 08/09/25 13:47:54 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


 436 // ReferenceProcessor class.
 437 // For objects in the CMS generation, this closure checks
 438 // if the object is "live" (reachable). Used in weak
 439 // reference processing.
 440 class CMSIsAliveClosure: public BoolObjectClosure {
 441   const MemRegion  _span;
 442   const CMSBitMap* _bit_map;
 443 
 444   friend class CMSCollector;
 445  public:
 446   CMSIsAliveClosure(MemRegion span,
 447                     CMSBitMap* bit_map):
 448     _span(span),
 449     _bit_map(bit_map) {
 450     assert(!span.is_empty(), "Empty span could spell trouble");
 451   }
 452 
 453   void do_object(oop obj) {
 454     assert(false, "not to be invoked");
 455   }
 456 
 457   bool do_object_b(oop obj);
 458 };
 459 
 460 
 461 // Implements AbstractRefProcTaskExecutor for CMS.
 462 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 463 public:
 464 
 465   CMSRefProcTaskExecutor(CMSCollector& collector)
 466     : _collector(collector)
 467   { }
 468   
 469   // Executes a task using worker threads.  
 470   virtual void execute(ProcessTask& task);
 471   virtual void execute(EnqueueTask& task);
 472 private:
 473   CMSCollector& _collector;
 474 };
 475 
 476 


 520   // The following array-pair keeps track of mark words
 521   // displaced for accomodating overflow list above.
 522   // This code will likely be revisited under RFE#4922830.
 523   GrowableArray<oop>*     _preserved_oop_stack; 
 524   GrowableArray<markOop>* _preserved_mark_stack; 
 525 
 526   int*             _hash_seed;
 527 
 528   // In support of multi-threaded concurrent phases
 529   YieldingFlexibleWorkGang* _conc_workers;
 530 
 531   // Performance Counters
 532   CollectorCounters* _gc_counters;
 533 
 534   // Initialization Errors
 535   bool _completed_initialization;
 536 
 537   // In support of ExplicitGCInvokesConcurrent
 538   static   bool _full_gc_requested;
 539   unsigned int  _collection_count_start;
 540 
 541   // Should we unload classes this concurrent cycle?
 542   bool _should_unload_classes;
 543   unsigned int  _concurrent_cycles_since_last_unload;
 544   unsigned int concurrent_cycles_since_last_unload() const {
 545     return _concurrent_cycles_since_last_unload;
 546   }
 547   // Did we (allow) unload classes in the previous concurrent cycle?
 548   bool unloaded_classes_last_cycle() const {
 549     return concurrent_cycles_since_last_unload() == 0;
 550   }
 551 
 552   // Verification support
 553   CMSBitMap     _verification_mark_bm;
 554   void verify_after_remark_work_1();
 555   void verify_after_remark_work_2();
 556 
 557   // true if any verification flag is on.
 558   bool _verifying;
 559   bool verifying() const { return _verifying; }
 560   void set_verifying(bool v) { _verifying = v; }
 561 
 562   // Collector policy
 563   ConcurrentMarkSweepPolicy* _collector_policy;
 564   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 565 
 566   // Check whether the gc time limit has been 
 567   // exceeded and set the size policy flag
 568   // appropriately.
 569   void check_gc_time_limit();


 578   CardTableRS*                   _ct;      // card table
 579 
 580   // CMS marking support structures
 581   CMSBitMap     _markBitMap;
 582   CMSBitMap     _modUnionTable;
 583   CMSMarkStack  _markStack;
 584   CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
 585                                           // to revisit
 586   CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
 587 
 588   HeapWord*     _restart_addr; // in support of marking stack overflow
 589   void          lower_restart_addr(HeapWord* low);
 590 
 591   // Counters in support of marking stack / work queue overflow handling:
 592   // a non-zero value indicates certain types of overflow events during
 593   // the current CMS cycle and could lead to stack resizing efforts at
 594   // an opportune future time.
 595   size_t        _ser_pmc_preclean_ovflw;
 596   size_t        _ser_pmc_remark_ovflw;
 597   size_t        _par_pmc_remark_ovflw;
 598   size_t        _ser_kac_preclean_ovflw;
 599   size_t        _ser_kac_ovflw;
 600   size_t        _par_kac_ovflw;
 601   NOT_PRODUCT(size_t _num_par_pushes;)
 602 
 603   // ("Weak") Reference processing support
 604   ReferenceProcessor*            _ref_processor;
 605   CMSIsAliveClosure              _is_alive_closure;
 606       // keep this textually after _markBitMap and _span; c'tor dependency
 607 
 608   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
 609   ModUnionClosure    _modUnionClosure;
 610   ModUnionClosurePar _modUnionClosurePar;
 611 
 612   // CMS abstract state machine
 613   // initial_state: Idling
 614   // next_state(Idling)            = {Marking}
 615   // next_state(Marking)           = {Precleaning, Sweeping}
 616   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
 617   // next_state(AbortablePreclean) = {FinalMarking}
 618   // next_state(FinalMarking)      = {Sweeping}


 640   bool _between_prologue_and_epilogue;
 641 
 642   // Signalling/State related to coordination between fore- and backgroud GC
 643   // Note: When the baton has been passed from background GC to foreground GC,
 644   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
 645   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
 646                                  // wants to go active
 647   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
 648                                  // yet passed the baton to the foreground GC
 649 
 650   // Support for CMSScheduleRemark (abortable preclean)
 651   bool _abort_preclean;
 652   bool _start_sampling;
 653 
 654   int    _numYields;
 655   size_t _numDirtyCards;
 656   uint   _sweepCount;
 657   // number of full gc's since the last concurrent gc.
 658   uint   _full_gcs_since_conc_gc;
 659 


 660   // occupancy used for bootstrapping stats
 661   double _bootstrap_occupancy;
 662 
 663   // timer
 664   elapsedTimer _timer;
 665 
 666   // Timing, allocation and promotion statistics, used for scheduling.
 667   CMSStats      _stats;
 668 
 669   // Allocation limits installed in the young gen, used only in
 670   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
 671   // these limits, the cms generation is notified and the cms thread is started
 672   // or stopped, respectively.
 673   HeapWord*     _icms_start_limit;
 674   HeapWord*     _icms_stop_limit;
 675 
 676   enum CMS_op_type {
 677     CMS_op_checkpointRootsInitial,
 678     CMS_op_checkpointRootsFinal
 679   };


 812   // allocation limits in the young gen.
 813   void icms_update_allocation_limits();
 814 
 815   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 816   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 817   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 818 
 819   void setup_cms_unloading_and_verification_state();
 820  public:
 821   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 822                ConcurrentMarkSweepGeneration* permGen,
 823                CardTableRS*                   ct,
 824                ConcurrentMarkSweepPolicy*     cp);
 825   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 826 
 827   ReferenceProcessor* ref_processor() { return _ref_processor; }
 828   void ref_processor_init();
 829 
 830   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 831   static CollectorState abstract_state() { return _collectorState;  }

 832 
 833   bool should_abort_preclean() const; // Whether preclean should be aborted.
 834   size_t get_eden_used() const;
 835   size_t get_eden_capacity() const;
 836 
 837   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 838 
 839   // locking checks
 840   NOT_PRODUCT(static bool have_cms_token();)
 841 
 842   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 843   bool shouldConcurrentCollect();
 844 
 845   void collect(bool   full,
 846                bool   clear_all_soft_refs,
 847                size_t size,
 848                bool   tlab);
 849   void collect_in_background(bool clear_all_soft_refs);
 850   void collect_in_foreground(bool clear_all_soft_refs);
 851 
 852   // In support of ExplicitGCInvokesConcurrent
 853   static void request_full_gc(unsigned int full_gc_count);
 854   // Should we unload classes in a particular concurrent cycle?
 855   bool should_unload_classes() const {
 856     return _should_unload_classes;


 857   }
 858   bool update_should_unload_classes();
 859 
 860   void direct_allocated(HeapWord* start, size_t size);
 861 
 862   // Object is dead if not marked and current phase is sweeping.
 863   bool is_dead_obj(oop obj) const;
 864 
 865   // After a promotion (of "start"), do any necessary marking.
 866   // If "par", then it's being done by a parallel GC thread.
 867   // The last two args indicate if we need precise marking
 868   // and if so the size of the object so it can be dirtied
 869   // in its entirety.
 870   void promoted(bool par, HeapWord* start,
 871                 bool is_obj_array, size_t obj_size);
 872 
 873   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 874                                      size_t word_size);
 875 
 876   void getFreelistLocks() const;
 877   void releaseFreelistLocks() const;
 878   bool haveFreelistLocks() const;


1007   // Non-product stat counters
1008   NOT_PRODUCT(
1009     int _numObjectsPromoted;
1010     int _numWordsPromoted;
1011     int _numObjectsAllocated;
1012     int _numWordsAllocated;
1013   )
1014 
1015   // Used for sizing decisions
1016   bool _incremental_collection_failed;
1017   bool incremental_collection_failed() {
1018     return _incremental_collection_failed;
1019   }
1020   void set_incremental_collection_failed() {
1021     _incremental_collection_failed = true;
1022   }
1023   void clear_incremental_collection_failed() {
1024     _incremental_collection_failed = false;
1025   }
1026 
1027   // accessors
1028   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1029   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1030 
1031  private:
1032   // For parallel young-gen GC support.
1033   CMSParGCThreadState** _par_gc_thread_states;
1034 
1035   // Reason generation was expanded
1036   CMSExpansionCause::Cause _expansion_cause;
1037 




1038   // In support of MinChunkSize being larger than min object size
1039   const double _dilatation_factor;
1040 
1041   enum CollectionTypes {
1042     Concurrent_collection_type          = 0,
1043     MS_foreground_collection_type       = 1,
1044     MSC_foreground_collection_type      = 2,
1045     Unknown_collection_type             = 3
1046   };
1047 
1048   CollectionTypes _debug_collection_type;
1049 
1050   // Fraction of current occupancy at which to start a CMS collection which
1051   // will collect this generation (at least).
1052   double _initiating_occupancy;
1053 
1054  protected:




1055   // Shrink generation by specified size (returns false if unable to shrink)
1056   virtual void shrink_by(size_t bytes);
1057 
1058   // Update statistics for GC
1059   virtual void update_gc_stats(int level, bool full);
1060 
1061   // Maximum available space in the generation (including uncommitted)
1062   // space.
1063   size_t max_available() const;
1064 
1065   // getter and initializer for _initiating_occupancy field.
1066   double initiating_occupancy() const { return _initiating_occupancy; }
1067   void   init_initiating_occupancy(intx io, intx tr);
1068 
1069  public:
1070   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1071                                 int level, CardTableRS* ct,
1072                                 bool use_adaptive_freelists,
1073                                 FreeBlockDictionary::DictionaryChoice);
1074 
1075   // Accessors
1076   CMSCollector* collector() const { return _collector; }
1077   static void set_collector(CMSCollector* collector) {
1078     assert(_collector == NULL, "already set");
1079     _collector = collector;
1080   }
1081   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1082   
1083   Mutex* freelistLock() const;
1084 
1085   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1086 
1087   // Adaptive size policy
1088   CMSAdaptiveSizePolicy* size_policy();
1089 
1090   bool refs_discovery_is_atomic() const { return false; }
1091   bool refs_discovery_is_mt()     const {
1092     // Note: CMS does MT-discovery during the parallel-remark
1093     // phases. Use ReferenceProcessorMTMutator to make refs
1094     // discovery MT-safe during such phases or other parallel
1095     // discovery phases in the future. This may all go away
1096     // if/when we decide that refs discovery is sufficiently
1097     // rare that the cost of the CAS's involved is in the
1098     // noise. That's a measurement that should be done, and
1099     // the code simplified if that turns out to be the case.
1100     return false;
1101   }
1102 
1103   // Override
1104   virtual void ref_processor_init();
1105 
1106   // Grow generation by specified size (returns false if unable to grow)
1107   bool grow_by(size_t bytes);
1108   // Grow generation to reserved size.
1109   bool grow_to_reserved();
1110 
1111   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1112 
1113   // Space enquiries
1114   size_t capacity() const;
1115   size_t used() const;
1116   size_t free() const;
1117   double occupancy() const { return ((double)used())/((double)capacity()); }
1118   size_t contiguous_available() const;
1119   size_t unsafe_max_alloc_nogc() const;
1120 
1121   // over-rides
1122   MemRegion used_region() const;
1123   MemRegion used_region_at_save_marks() const;
1124 
1125   // Does a "full" (forced) collection invoked on this generation collect
1126   // all younger generations as well? Note that the second conjunct is a
1127   // hack to allow the collection of the younger gen first if the flag is
1128   // set. This is better than using th policy's should_collect_gen0_first()
1129   // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1130   virtual bool full_collects_younger_generations() const {
1131     return UseCMSCompactAtFullCollection && !CollectGen0First;
1132   }
1133 
1134   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1135 
1136   // Support for compaction
1137   CompactibleSpace* first_compaction_space() const;
1138   // Adjust quantites in the generation affected by
1139   // the compaction.
1140   void reset_after_compaction();
1141 
1142   // Allocation support
1143   HeapWord* allocate(size_t size, bool tlab);
1144   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1145   oop       promote(oop obj, size_t obj_size);
1146   HeapWord* par_allocate(size_t size, bool tlab) {
1147     return allocate(size, tlab);
1148   }
1149 
1150   // Incremental mode triggering.
1151   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1152                                      size_t word_size);
1153 
1154   // Used by CMSStats to track direct allocation.  The value is sampled and
1155   // reset after each young gen collection.
1156   size_t direct_allocated_words() const { return _direct_allocated_words; }
1157   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1158 
1159   // Overrides for parallel promotion.
1160   virtual oop par_promote(int thread_num,
1161                           oop obj, markOop m, size_t word_sz);
1162   // This one should not be called for CMS.
1163   virtual void par_promote_alloc_undo(int thread_num,
1164                                       HeapWord* obj, size_t word_sz);
1165   virtual void par_promote_alloc_done(int thread_num);
1166   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1167 
1168   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1169     bool younger_handles_promotion_failure) const;
1170 
1171   bool should_collect(bool full, size_t size, bool tlab);
1172   virtual bool should_concurrent_collect() const;
1173   virtual bool is_too_full() const;
1174   void collect(bool   full,
1175                bool   clear_all_soft_refs,
1176                size_t size,
1177                bool   tlab);
1178 
1179   HeapWord* expand_and_allocate(size_t word_size,
1180                                 bool tlab,
1181                                 bool parallel = false);
1182 
1183   // GC prologue and epilogue
1184   void gc_prologue(bool full);
1185   void gc_prologue_work(bool full, bool registerClosure,
1186                         ModUnionClosure* modUnionClosure);
1187   void gc_epilogue(bool full);
1188   void gc_epilogue_work(bool full);
1189 
1190   // Time since last GC of this generation
1191   jlong time_of_last_gc(jlong now) {
1192     return collector()->time_of_last_gc(now);
1193   }
1194   void update_time_of_last_gc(jlong now) {
1195     collector()-> update_time_of_last_gc(now);
1196   }
1197 
1198   // Allocation failure
1199   void expand(size_t bytes, size_t expand_bytes, 
1200     CMSExpansionCause::Cause cause);
1201   virtual bool expand(size_t bytes, size_t expand_bytes);
1202   void shrink(size_t bytes);
1203   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1204   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1205 
1206   // Iteration support and related enquiries
1207   void save_marks();
1208   bool no_allocs_since_save_marks();
1209   void object_iterate_since_last_GC(ObjectClosure* cl);
1210   void younger_refs_iterate(OopsInGenClosure* cl);
1211 
1212   // Iteration support specific to CMS generations
1213   void save_sweep_limit();
1214 
1215   // More iteration support
1216   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1217   virtual void oop_iterate(OopClosure* cl);
1218   virtual void object_iterate(ObjectClosure* cl);
1219 
1220   // Need to declare the full complement of closures, whether we'll
1221   // override them or not, or get message from the compiler:


1289                                   bool use_adaptive_freelists,
1290                                   FreeBlockDictionary::DictionaryChoice 
1291                                     dictionaryChoice) :
1292     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1293       use_adaptive_freelists, dictionaryChoice) {}
1294 
1295   virtual const char* short_name() const { return "ASCMS"; }
1296   virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1297 
1298   virtual void update_counters();
1299   virtual void update_counters(size_t used);
1300 };
1301 
1302 //
1303 // Closures of various sorts used by CMS to accomplish its work
1304 //
1305 
1306 // This closure is used to check that a certain set of oops is empty.
1307 class FalseClosure: public OopClosure {
1308  public:
1309   void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
1310   void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }

1311 };
1312 
1313 // This closure is used to do concurrent marking from the roots
1314 // following the first checkpoint. 
1315 class MarkFromRootsClosure: public BitMapClosure {
1316   CMSCollector*  _collector;
1317   MemRegion      _span;
1318   CMSBitMap*     _bitMap;
1319   CMSBitMap*     _mut;
1320   CMSMarkStack*  _markStack;
1321   CMSMarkStack*  _revisitStack;
1322   bool           _yield;
1323   int            _skipBits;
1324   HeapWord*      _finger;
1325   HeapWord*      _threshold;
1326   DEBUG_ONLY(bool _verifying;)
1327 
1328  public:
1329   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1330                        CMSBitMap* bitMap,
1331                        CMSMarkStack*  markStack,
1332                        CMSMarkStack*  revisitStack,
1333                        bool should_yield, bool verifying = false);
1334   bool do_bit(size_t offset);
1335   void reset(HeapWord* addr);
1336   inline void do_yield_check();
1337 
1338  private:
1339   void scanOopsInOop(HeapWord* ptr);
1340   void do_yield_work();
1341 };
1342 
1343 // This closure is used to do concurrent multi-threaded
1344 // marking from the roots following the first checkpoint. 
1345 // XXX This should really be a subclass of The serial version
1346 // above, but i have not had the time to refactor things cleanly.
1347 // That willbe done for Dolphin.
1348 class Par_MarkFromRootsClosure: public BitMapClosure {
1349   CMSCollector*  _collector;
1350   MemRegion      _whole_span;
1351   MemRegion      _span;
1352   CMSBitMap*     _bit_map;
1353   CMSBitMap*     _mut;
1354   OopTaskQueue*  _work_queue;
1355   CMSMarkStack*  _overflow_stack;
1356   CMSMarkStack*  _revisit_stack;
1357   bool           _yield;
1358   int            _skip_bits;
1359   HeapWord*      _finger;
1360   HeapWord*      _threshold;
1361   CMSConcMarkingTask* _task;
1362  public:
1363   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1364                        MemRegion span,
1365                        CMSBitMap* bit_map,
1366                        OopTaskQueue* work_queue,
1367                        CMSMarkStack*  overflow_stack,
1368                        CMSMarkStack*  revisit_stack,
1369                        bool should_yield);
1370   bool do_bit(size_t offset);
1371   inline void do_yield_check();
1372 
1373  private:
1374   void scan_oops_in_oop(HeapWord* ptr);
1375   void do_yield_work();
1376   bool get_work_from_overflow_stack();
1377 };
1378 
1379 // The following closures are used to do certain kinds of verification of
1380 // CMS marking.
1381 class PushAndMarkVerifyClosure: public OopClosure {
1382   CMSCollector*    _collector;
1383   MemRegion        _span;
1384   CMSBitMap*       _verification_bm;
1385   CMSBitMap*       _cms_bm;
1386   CMSMarkStack*    _mark_stack;
1387  protected:
1388   void do_oop(oop p);
1389   template <class T> inline void do_oop_work(T *p) {
1390     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1391     do_oop(obj);
1392   }
1393  public:
1394   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1395                            MemRegion span,
1396                            CMSBitMap* verification_bm,
1397                            CMSBitMap* cms_bm,
1398                            CMSMarkStack*  mark_stack);
1399   void do_oop(oop* p);
1400   void do_oop(narrowOop* p);
1401   // Deal with a stack overflow condition
1402   void handle_stack_overflow(HeapWord* lost);
1403 };
1404 
1405 class MarkFromRootsVerifyClosure: public BitMapClosure {
1406   CMSCollector*  _collector;
1407   MemRegion      _span;
1408   CMSBitMap*     _verification_bm;
1409   CMSBitMap*     _cms_bm;
1410   CMSMarkStack*  _mark_stack;
1411   HeapWord*      _finger;
1412   PushAndMarkVerifyClosure _pam_verify_closure;
1413  public:
1414   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1415                              CMSBitMap* verification_bm,
1416                              CMSBitMap* cms_bm,
1417                              CMSMarkStack*  mark_stack);
1418   bool do_bit(size_t offset);
1419   void reset(HeapWord* addr);
1420 };
1421 
1422 
1423 // This closure is used to check that a certain set of bits is
1424 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1425 class FalseBitMapClosure: public BitMapClosure {
1426  public:
1427   bool do_bit(size_t offset) {
1428     guarantee(false, "Should not have a 1 bit");
1429     return true;
1430   }
1431 };
1432 
1433 // This closure is used during the second checkpointing phase
1434 // to rescan the marked objects on the dirty cards in the mod
1435 // union table and the card table proper. It's invoked via
1436 // MarkFromDirtyCardsClosure below. It uses either
1437 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1438 // declared in genOopClosures.hpp to accomplish some of its work.
1439 // In the parallel case the bitMap is shared, so access to
1440 // it needs to be suitably synchronized for updates by embedded
1441 // closures that update it; however, this closure itself only
1442 // reads the bit_map and because it is idempotent, is immune to
1443 // reading stale values.
1444 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1445   #ifdef ASSERT
1446     CMSCollector*          _collector;
1447     MemRegion              _span;
1448     union {
1449       CMSMarkStack*        _mark_stack;


1736   // Yield
1737   void do_yield_work(HeapWord* addr);
1738 
1739   // Debugging/Printing
1740   void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1741 
1742  public:
1743   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1744                CMSBitMap* bitMap, bool should_yield);
1745   ~SweepClosure();
1746 
1747   size_t       do_blk_careful(HeapWord* addr);
1748 };
1749 
1750 // Closures related to weak references processing
1751 
1752 // During CMS' weak reference processing, this is a
1753 // work-routine/closure used to complete transitive
1754 // marking of objects as live after a certain point
1755 // in which an initial set has been completely accumulated.
1756 // This closure is currently used both during the final
1757 // remark stop-world phase, as well as during the concurrent
1758 // precleaning of the discovered reference lists.
1759 class CMSDrainMarkingStackClosure: public VoidClosure {
1760   CMSCollector*        _collector;
1761   MemRegion            _span;
1762   CMSMarkStack*        _mark_stack;
1763   CMSBitMap*           _bit_map;
1764   CMSKeepAliveClosure* _keep_alive;
1765   bool                 _concurrent_precleaning;
1766  public:
1767   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1768                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1769                       CMSKeepAliveClosure* keep_alive,
1770                       bool cpc):
1771     _collector(collector),
1772     _span(span),
1773     _bit_map(bit_map),
1774     _mark_stack(mark_stack),
1775     _keep_alive(keep_alive),
1776     _concurrent_precleaning(cpc) {
1777     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1778            "Mismatch");
1779   }
1780 
1781   void do_void();
1782 };
1783 
1784 // A parallel version of CMSDrainMarkingStackClosure above.
1785 class CMSParDrainMarkingStackClosure: public VoidClosure {
1786   CMSCollector*           _collector;
1787   MemRegion               _span;
1788   OopTaskQueue*           _work_queue;
1789   CMSBitMap*              _bit_map;
1790   CMSInnerParMarkAndPushClosure _mark_and_push;
1791 
1792  public:
1793   CMSParDrainMarkingStackClosure(CMSCollector* collector,
1794                                  MemRegion span, CMSBitMap* bit_map,
1795                                  OopTaskQueue* work_queue):
1796     _collector(collector),
1797     _span(span),
1798     _bit_map(bit_map),
1799     _work_queue(work_queue),