< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page
rev 9245 : [mq]: 8141134-remove-unncessary-pragmas


  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/referencePolicy.hpp"
  43 #include "gc/shared/space.hpp"
  44 #include "gc/shared/spaceDecorator.hpp"
  45 #include "gc/shared/strongRootsScope.hpp"
  46 #include "gc/shared/taskqueue.inline.hpp"
  47 #include "gc/shared/workgroup.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/handles.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/java.hpp"
  55 #include "runtime/thread.inline.hpp"
  56 #include "utilities/copy.hpp"
  57 #include "utilities/globalDefinitions.hpp"
  58 #include "utilities/stack.inline.hpp"
  59 
  60 #ifdef _MSC_VER
  61 #pragma warning( push )
  62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  63 #endif
  64 ParScanThreadState::ParScanThreadState(Space* to_space_,
  65                                        ParNewGeneration* young_gen_,
  66                                        Generation* old_gen_,
  67                                        int thread_num_,
  68                                        ObjToScanQueueSet* work_queue_set_,
  69                                        Stack<oop, mtGC>* overflow_stacks_,
  70                                        size_t desired_plab_sz_,
  71                                        ParallelTaskTerminator& term_) :
  72   _to_space(to_space_),
  73   _old_gen(old_gen_),
  74   _young_gen(young_gen_),
  75   _thread_num(thread_num_),
  76   _work_queue(work_queue_set_->queue(thread_num_)),
  77   _to_space_full(false),
  78   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  79   _ageTable(false), // false ==> not the global age table, no perf data.
  80   _to_space_alloc_buffer(desired_plab_sz_),
  81   _to_space_closure(young_gen_, this),
  82   _old_gen_closure(young_gen_, this),
  83   _to_space_root_closure(young_gen_, this),


  87                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
  88                       work_queue_set_, &term_),
  89   _is_alive_closure(young_gen_),
  90   _scan_weak_ref_closure(young_gen_, this),
  91   _keep_alive_closure(&_scan_weak_ref_closure),
  92   _strong_roots_time(0.0),
  93   _term_time(0.0)
  94 {
  95   #if TASKQUEUE_STATS
  96   _term_attempts = 0;
  97   _overflow_refills = 0;
  98   _overflow_refill_objs = 0;
  99   #endif // TASKQUEUE_STATS
 100 
 101   _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
 102   _hash_seed = 17;  // Might want to take time-based random value.
 103   _start = os::elapsedTime();
 104   _old_gen_closure.set_generation(old_gen_);
 105   _old_gen_root_closure.set_generation(old_gen_);
 106 }
 107 #ifdef _MSC_VER
 108 #pragma warning( pop )
 109 #endif
 110 
 111 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
 112                                               size_t plab_word_size) {
 113   ChunkArray* sca = survivor_chunk_array();
 114   if (sca != NULL) {
 115     // A non-null SCA implies that we want the PLAB data recorded.
 116     sca->record_sample(plab_start, plab_word_size);
 117   }
 118 }
 119 
 120 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
 121   return new_obj->is_objArray() &&
 122          arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
 123          new_obj != old_obj;
 124 }
 125 
 126 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
 127   assert(old->is_objArray(), "must be obj array");
 128   assert(old->is_forwarded(), "must be forwarded");
 129   assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");


 580   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 581                                            &par_scan_state.to_space_root_closure(),
 582                                            false);
 583 
 584   par_scan_state.start_strong_roots();
 585   gch->gen_process_roots(_strong_roots_scope,
 586                          GenCollectedHeap::YoungGen,
 587                          true,  // Process younger gens, if any, as strong roots.
 588                          GenCollectedHeap::SO_ScavengeCodeCache,
 589                          GenCollectedHeap::StrongAndWeakRoots,
 590                          &par_scan_state.to_space_root_closure(),
 591                          &par_scan_state.older_gen_closure(),
 592                          &cld_scan_closure);
 593 
 594   par_scan_state.end_strong_roots();
 595 
 596   // "evacuate followers".
 597   par_scan_state.evacuate_followers_closure().do_void();
 598 }
 599 
 600 #ifdef _MSC_VER
 601 #pragma warning( push )
 602 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 603 #endif
 604 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
 605   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
 606   _overflow_list(NULL),
 607   _is_alive_closure(this),
 608   _plab_stats(YoungPLABSize, PLABWeight)
 609 {
 610   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
 611   NOT_PRODUCT(_num_par_pushes = 0;)
 612   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
 613   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 614 
 615   for (uint i = 0; i < ParallelGCThreads; i++) {
 616     ObjToScanQueue *q = new ObjToScanQueue();
 617     guarantee(q != NULL, "work_queue Allocation failure.");
 618     _task_queues->register_queue(i, q);
 619   }
 620 
 621   for (uint i = 0; i < ParallelGCThreads; i++) {
 622     _task_queues->queue(i)->initialize();
 623   }


 626   if (ParGCUseLocalOverflow) {
 627     // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
 628     typedef Stack<oop, mtGC> GCOopStack;
 629 
 630     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
 631     for (size_t i = 0; i < ParallelGCThreads; ++i) {
 632       new (_overflow_stacks + i) Stack<oop, mtGC>();
 633     }
 634   }
 635 
 636   if (UsePerfData) {
 637     EXCEPTION_MARK;
 638     ResourceMark rm;
 639 
 640     const char* cname =
 641          PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
 642     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
 643                                      ParallelGCThreads, CHECK);
 644   }
 645 }
 646 #ifdef _MSC_VER
 647 #pragma warning( pop )
 648 #endif
 649 
 650 // ParNewGeneration::
 651 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
 652   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
 653 
 654 template <class T>
 655 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 656 #ifdef ASSERT
 657   {
 658     assert(!oopDesc::is_null(*p), "expected non-null ref");
 659     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 660     // We never expect to see a null reference being processed
 661     // as a weak reference.
 662     assert(obj->is_oop(), "expected an oop while scanning weak refs");
 663   }
 664 #endif // ASSERT
 665 
 666   _par_cl->do_oop_nv(p);
 667 
 668   if (GenCollectedHeap::heap()->is_in_reserved(p)) {




  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/referencePolicy.hpp"
  43 #include "gc/shared/space.hpp"
  44 #include "gc/shared/spaceDecorator.hpp"
  45 #include "gc/shared/strongRootsScope.hpp"
  46 #include "gc/shared/taskqueue.inline.hpp"
  47 #include "gc/shared/workgroup.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/handles.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/java.hpp"
  55 #include "runtime/thread.inline.hpp"
  56 #include "utilities/copy.hpp"
  57 #include "utilities/globalDefinitions.hpp"
  58 #include "utilities/stack.inline.hpp"
  59 




  60 ParScanThreadState::ParScanThreadState(Space* to_space_,
  61                                        ParNewGeneration* young_gen_,
  62                                        Generation* old_gen_,
  63                                        int thread_num_,
  64                                        ObjToScanQueueSet* work_queue_set_,
  65                                        Stack<oop, mtGC>* overflow_stacks_,
  66                                        size_t desired_plab_sz_,
  67                                        ParallelTaskTerminator& term_) :
  68   _to_space(to_space_),
  69   _old_gen(old_gen_),
  70   _young_gen(young_gen_),
  71   _thread_num(thread_num_),
  72   _work_queue(work_queue_set_->queue(thread_num_)),
  73   _to_space_full(false),
  74   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  75   _ageTable(false), // false ==> not the global age table, no perf data.
  76   _to_space_alloc_buffer(desired_plab_sz_),
  77   _to_space_closure(young_gen_, this),
  78   _old_gen_closure(young_gen_, this),
  79   _to_space_root_closure(young_gen_, this),


  83                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
  84                       work_queue_set_, &term_),
  85   _is_alive_closure(young_gen_),
  86   _scan_weak_ref_closure(young_gen_, this),
  87   _keep_alive_closure(&_scan_weak_ref_closure),
  88   _strong_roots_time(0.0),
  89   _term_time(0.0)
  90 {
  91   #if TASKQUEUE_STATS
  92   _term_attempts = 0;
  93   _overflow_refills = 0;
  94   _overflow_refill_objs = 0;
  95   #endif // TASKQUEUE_STATS
  96 
  97   _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
  98   _hash_seed = 17;  // Might want to take time-based random value.
  99   _start = os::elapsedTime();
 100   _old_gen_closure.set_generation(old_gen_);
 101   _old_gen_root_closure.set_generation(old_gen_);
 102 }



 103 
 104 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
 105                                               size_t plab_word_size) {
 106   ChunkArray* sca = survivor_chunk_array();
 107   if (sca != NULL) {
 108     // A non-null SCA implies that we want the PLAB data recorded.
 109     sca->record_sample(plab_start, plab_word_size);
 110   }
 111 }
 112 
 113 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
 114   return new_obj->is_objArray() &&
 115          arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
 116          new_obj != old_obj;
 117 }
 118 
 119 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
 120   assert(old->is_objArray(), "must be obj array");
 121   assert(old->is_forwarded(), "must be forwarded");
 122   assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");


 573   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 574                                            &par_scan_state.to_space_root_closure(),
 575                                            false);
 576 
 577   par_scan_state.start_strong_roots();
 578   gch->gen_process_roots(_strong_roots_scope,
 579                          GenCollectedHeap::YoungGen,
 580                          true,  // Process younger gens, if any, as strong roots.
 581                          GenCollectedHeap::SO_ScavengeCodeCache,
 582                          GenCollectedHeap::StrongAndWeakRoots,
 583                          &par_scan_state.to_space_root_closure(),
 584                          &par_scan_state.older_gen_closure(),
 585                          &cld_scan_closure);
 586 
 587   par_scan_state.end_strong_roots();
 588 
 589   // "evacuate followers".
 590   par_scan_state.evacuate_followers_closure().do_void();
 591 }
 592 




 593 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
 594   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
 595   _overflow_list(NULL),
 596   _is_alive_closure(this),
 597   _plab_stats(YoungPLABSize, PLABWeight)
 598 {
 599   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
 600   NOT_PRODUCT(_num_par_pushes = 0;)
 601   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
 602   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 603 
 604   for (uint i = 0; i < ParallelGCThreads; i++) {
 605     ObjToScanQueue *q = new ObjToScanQueue();
 606     guarantee(q != NULL, "work_queue Allocation failure.");
 607     _task_queues->register_queue(i, q);
 608   }
 609 
 610   for (uint i = 0; i < ParallelGCThreads; i++) {
 611     _task_queues->queue(i)->initialize();
 612   }


 615   if (ParGCUseLocalOverflow) {
 616     // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
 617     typedef Stack<oop, mtGC> GCOopStack;
 618 
 619     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
 620     for (size_t i = 0; i < ParallelGCThreads; ++i) {
 621       new (_overflow_stacks + i) Stack<oop, mtGC>();
 622     }
 623   }
 624 
 625   if (UsePerfData) {
 626     EXCEPTION_MARK;
 627     ResourceMark rm;
 628 
 629     const char* cname =
 630          PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
 631     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
 632                                      ParallelGCThreads, CHECK);
 633   }
 634 }



 635 
 636 // ParNewGeneration::
 637 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
 638   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
 639 
 640 template <class T>
 641 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 642 #ifdef ASSERT
 643   {
 644     assert(!oopDesc::is_null(*p), "expected non-null ref");
 645     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 646     // We never expect to see a null reference being processed
 647     // as a weak reference.
 648     assert(obj->is_oop(), "expected an oop while scanning weak refs");
 649   }
 650 #endif // ASSERT
 651 
 652   _par_cl->do_oop_nv(p);
 653 
 654   if (GenCollectedHeap::heap()->is_in_reserved(p)) {


< prev index next >