src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




 275 //
 276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 277   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 278   if (io >= 0) {
 279     _initiating_occupancy = (double)io / 100.0;
 280   } else {
 281     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 282                              (double)(tr * MinHeapFreeRatio) / 100.0)
 283                             / 100.0;
 284   }
 285 }
 286 
 287 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 288   assert(collector() != NULL, "no collector");
 289   collector()->ref_processor_init();
 290 }
 291 
 292 void CMSCollector::ref_processor_init() {
 293   if (_ref_processor == NULL) {
 294     // Allocate and initialize a reference processor
 295     _ref_processor = ReferenceProcessor::create_ref_processor(
 296         _span,                               // span
 297         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
 298         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
 299         &_is_alive_closure,
 300         ParallelGCThreads,
 301         ParallelRefProcEnabled);


 302     // Initialize the _ref_processor field of CMSGen
 303     _cmsGen->set_ref_processor(_ref_processor);
 304 
 305     // Allocate a dummy ref processor for perm gen.
 306     ReferenceProcessor* rp2 = new ReferenceProcessor();
 307     if (rp2 == NULL) {
 308       vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
 309     }
 310     _permGen->set_ref_processor(rp2);
 311   }
 312 }
 313 
 314 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 315   GenCollectedHeap* gch = GenCollectedHeap::heap();
 316   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 317     "Wrong type of heap");
 318   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 319     gch->gen_policy()->size_policy();
 320   assert(sp->is_gc_cms_adaptive_size_policy(),
 321     "Wrong type of size policy");


5686   verify_overflow_empty();
5687 }
5688 
5689 ////////////////////////////////////////////////////////
5690 // Parallel Reference Processing Task Proxy Class
5691 ////////////////////////////////////////////////////////
5692 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5693   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5694   CMSCollector*          _collector;
5695   CMSBitMap*             _mark_bit_map;
5696   const MemRegion        _span;
5697   ProcessTask&           _task;
5698 
5699 public:
5700   CMSRefProcTaskProxy(ProcessTask&     task,
5701                       CMSCollector*    collector,
5702                       const MemRegion& span,
5703                       CMSBitMap*       mark_bit_map,
5704                       AbstractWorkGang* workers,
5705                       OopTaskQueueSet* task_queues):




5706     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5707       task_queues),
5708     _task(task),
5709     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5710     {
5711       assert(_collector->_span.equals(_span) && !_span.is_empty(),
5712              "Inconsistency in _span");

5713     }
5714 
5715   OopTaskQueueSet* task_queues() { return queues(); }
5716 
5717   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5718 
5719   void do_work_steal(int i,
5720                      CMSParDrainMarkingStackClosure* drain,
5721                      CMSParKeepAliveClosure* keep_alive,
5722                      int* seed);
5723 
5724   virtual void work(int i);
5725 };
5726 
5727 void CMSRefProcTaskProxy::work(int i) {
5728   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5729   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5730                                         _mark_bit_map,
5731                                         &_collector->_revisitStack,
5732                                         work_queue(i));




 275 //
 276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 277   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 278   if (io >= 0) {
 279     _initiating_occupancy = (double)io / 100.0;
 280   } else {
 281     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 282                              (double)(tr * MinHeapFreeRatio) / 100.0)
 283                             / 100.0;
 284   }
 285 }
 286 
 287 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 288   assert(collector() != NULL, "no collector");
 289   collector()->ref_processor_init();
 290 }
 291 
 292 void CMSCollector::ref_processor_init() {
 293   if (_ref_processor == NULL) {
 294     // Allocate and initialize a reference processor
 295     _ref_processor =
 296       new ReferenceProcessor(_span,                               // span
 297                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 298                              ParallelGCThreads,                   // mt processing degree
 299                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 300                              ConcGCThreads,                       // mt discovery degree
 301                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 302                              &_is_alive_closure,                  // closure for liveness info
 303                              false);                              // next field updates do not need write barrier
 304     // Initialize the _ref_processor field of CMSGen
 305     _cmsGen->set_ref_processor(_ref_processor);
 306 
 307     // Allocate a dummy ref processor for perm gen.
 308     ReferenceProcessor* rp2 = new ReferenceProcessor();
 309     if (rp2 == NULL) {
 310       vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
 311     }
 312     _permGen->set_ref_processor(rp2);
 313   }
 314 }
 315 
 316 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 317   GenCollectedHeap* gch = GenCollectedHeap::heap();
 318   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 319     "Wrong type of heap");
 320   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 321     gch->gen_policy()->size_policy();
 322   assert(sp->is_gc_cms_adaptive_size_policy(),
 323     "Wrong type of size policy");


5688   verify_overflow_empty();
5689 }
5690 
5691 ////////////////////////////////////////////////////////
5692 // Parallel Reference Processing Task Proxy Class
5693 ////////////////////////////////////////////////////////
5694 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5695   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5696   CMSCollector*          _collector;
5697   CMSBitMap*             _mark_bit_map;
5698   const MemRegion        _span;
5699   ProcessTask&           _task;
5700 
5701 public:
5702   CMSRefProcTaskProxy(ProcessTask&     task,
5703                       CMSCollector*    collector,
5704                       const MemRegion& span,
5705                       CMSBitMap*       mark_bit_map,
5706                       AbstractWorkGang* workers,
5707                       OopTaskQueueSet* task_queues):
5708     // XXX Should superclass AGTWOQ also know about AWG since it knows
5709     // about the task_queues used by the AWG? Then it could initialize
5710     // the terminator() object. See 6984287. The set_for_termination()
5711     // below is a temporary band-aid for the regression in 6984287.
5712     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5713       task_queues),
5714     _task(task),
5715     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5716   {
5717     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5718            "Inconsistency in _span");
5719     set_for_termination(workers->active_workers());
5720   }
5721 
5722   OopTaskQueueSet* task_queues() { return queues(); }
5723 
5724   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5725 
5726   void do_work_steal(int i,
5727                      CMSParDrainMarkingStackClosure* drain,
5728                      CMSParKeepAliveClosure* keep_alive,
5729                      int* seed);
5730 
5731   virtual void work(int i);
5732 };
5733 
5734 void CMSRefProcTaskProxy::work(int i) {
5735   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5736   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5737                                         _mark_bit_map,
5738                                         &_collector->_revisitStack,
5739                                         work_queue(i));