src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 6796 : [mq]: templateOopIterate


  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/gcTrace.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"

  33 #include "memory/defNewGeneration.inline.hpp"
  34 #include "memory/gcLocker.inline.hpp"
  35 #include "memory/genCollectedHeap.hpp"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/genRemSet.hpp"
  38 #include "memory/generationSpec.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/space.inline.hpp"
  42 #include "oops/instanceRefKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.inline.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/prefetch.inline.hpp"
  47 #include "runtime/thread.inline.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 #include "utilities/stack.inline.hpp"
  51 
  52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  53 
  54 //
  55 // DefNewGeneration functions.


  74 
  75 
  76 DefNewGeneration::FastKeepAliveClosure::
  77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  78   DefNewGeneration::KeepAliveClosure(cl) {
  79   _boundary = g->reserved().end();
  80 }
  81 
  82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  83 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  84 
  85 DefNewGeneration::EvacuateFollowersClosure::
  86 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  87                          ScanClosure* cur, ScanClosure* older) :
  88   _gch(gch), _level(level),
  89   _scan_cur_or_nonheap(cur), _scan_older(older)
  90 {}
  91 
  92 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  93   do {
  94     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  95                                        _scan_older);
  96   } while (!_gch->no_allocs_since_save_marks(_level));
  97 }
  98 
  99 DefNewGeneration::FastEvacuateFollowersClosure::
 100 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 101                              DefNewGeneration* gen,
 102                              FastScanClosure* cur, FastScanClosure* older) :
 103   _gch(gch), _level(level), _gen(gen),
 104   _scan_cur_or_nonheap(cur), _scan_older(older)
 105 {}
 106 
 107 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 108   do {
 109     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
 110                                        _scan_older);
 111   } while (!_gch->no_allocs_since_save_marks(_level));
 112   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 113 }
 114 
 115 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 116     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 117 {
 118   assert(_g->level() == 0, "Optimized for youngest generation");
 119   _boundary = _g->reserved().end();
 120 }
 121 
 122 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 123 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 124 
 125 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 126     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 127 {
 128   assert(_g->level() == 0, "Optimized for youngest generation");
 129   _boundary = _g->reserved().end();


 169   assert(_g->level() == 0, "Optimized for youngest generation");
 170   _boundary = _g->reserved().end();
 171 }
 172 
 173 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 175 
 176 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 178 
 179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 180                                    KlassRemSet* klass_rem_set)
 181     : _scavenge_closure(scavenge_closure),
 182       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 183 
 184 
 185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 186                                    size_t initial_size,
 187                                    int level,
 188                                    const char* policy)
 189   : Generation(rs, initial_size, level),
 190     _promo_failure_drain_in_progress(false),
 191     _should_allocate_from_space(false)
 192 {
 193   MemRegion cmr((HeapWord*)_virtual_space.low(),
 194                 (HeapWord*)_virtual_space.high());
 195   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 196 
 197   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 198     _eden_space = new ConcEdenSpace(this);
 199   } else {
 200     _eden_space = new EdenSpace(this);
 201   }
 202   _from_space = new ContiguousSpace();
 203   _to_space   = new ContiguousSpace();
 204 
 205   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 206     vm_exit_during_initialization("Could not allocate a new gen space");
 207 
 208   // Compute the maximum eden and survivor space sizes. These sizes
 209   // are computed assuming the entire reserved space is committed.


 805     const intx interval = PrefetchCopyIntervalInBytes;
 806     Prefetch::write(obj, interval);
 807 
 808     // Copy obj
 809     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 810 
 811     // Increment age if obj still in new generation
 812     obj->incr_age();
 813     age_table()->add(obj, s);
 814   }
 815 
 816   // Done, insert forward pointer to obj in this header
 817   old->forward_to(obj);
 818 
 819   return obj;
 820 }
 821 
 822 void DefNewGeneration::drain_promo_failure_scan_stack() {
 823   while (!_promo_failure_scan_stack.is_empty()) {
 824      oop obj = _promo_failure_scan_stack.pop();
 825      obj->oop_iterate(_promo_failure_scan_stack_closure);
 826   }
 827 }
 828 
 829 void DefNewGeneration::save_marks() {
 830   eden()->set_saved_mark();
 831   to()->set_saved_mark();
 832   from()->set_saved_mark();
 833 }
 834 
 835 
 836 void DefNewGeneration::reset_saved_marks() {
 837   eden()->reset_saved_mark();
 838   to()->reset_saved_mark();
 839   from()->reset_saved_mark();
 840 }
 841 
 842 
 843 bool DefNewGeneration::no_allocs_since_save_marks() {
 844   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 845   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 846   return to()->saved_mark_at_top();
 847 }
 848 
 849 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 850                                                                 \
 851 void DefNewGeneration::                                         \
 852 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 853   cl->set_generation(this);                                     \
 854   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 855   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 856   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 857   cl->reset_generation();                                       \
 858   save_marks();                                                 \
 859 }
 860 
 861 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 862 
 863 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 864 
 865 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 866                                          size_t max_alloc_words) {
 867   if (requestor == this || _promotion_failed) return;
 868   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 869 
 870   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 871   if (to_space->top() > to_space->bottom()) {
 872     trace("to_space not empty when contribute_scratch called");
 873   }
 874   */
 875 
 876   ContiguousSpace* to_space = to();
 877   assert(to_space->end() >= to_space->top(), "pointers out of order");
 878   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 879   if (free_words >= MinFreeScratchWords) {
 880     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 881     sb->num_words = free_words;
 882     sb->next = list;
 883     list = sb;




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/gcTrace.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "gc_interface/collectedHeap.inline.hpp"
  34 #include "memory/defNewGeneration.inline.hpp"
  35 #include "memory/gcLocker.inline.hpp"
  36 #include "memory/genCollectedHeap.inline.hpp"
  37 #include "memory/genOopClosures.inline.hpp"
  38 #include "memory/genRemSet.hpp"
  39 #include "memory/generationSpec.hpp"
  40 #include "memory/iterator.hpp"
  41 #include "memory/referencePolicy.hpp"
  42 #include "memory/space.inline.hpp"
  43 #include "oops/instanceRefKlass.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.inline.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/stack.inline.hpp"
  52 
  53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  54 
  55 //
  56 // DefNewGeneration functions.


  75 
  76 
  77 DefNewGeneration::FastKeepAliveClosure::
  78 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  79   DefNewGeneration::KeepAliveClosure(cl) {
  80   _boundary = g->reserved().end();
  81 }
  82 
  83 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  84 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  85 
  86 DefNewGeneration::EvacuateFollowersClosure::
  87 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  88                          ScanClosure* cur, ScanClosure* older) :
  89   _gch(gch), _level(level),
  90   _scan_cur_or_nonheap(cur), _scan_older(older)
  91 {}
  92 
  93 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  94   do {
  95     _gch->gch_oop_since_save_marks_iterate<true>(_level, _scan_cur_or_nonheap,
  96                                        _scan_older);
  97   } while (!_gch->no_allocs_since_save_marks(_level));
  98 }
  99 
 100 DefNewGeneration::FastEvacuateFollowersClosure::
 101 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 102                              DefNewGeneration* gen,
 103                              FastScanClosure* cur, FastScanClosure* older) :
 104   _gch(gch), _level(level), _gen(gen),
 105   _scan_cur_or_nonheap(cur), _scan_older(older)
 106 {}
 107 
 108 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 109   do {
 110     _gch->gch_oop_since_save_marks_iterate<true>(_level, _scan_cur_or_nonheap,
 111                                        _scan_older);
 112   } while (!_gch->no_allocs_since_save_marks(_level));
 113   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 114 }
 115 
 116 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 117     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 118 {
 119   assert(_g->level() == 0, "Optimized for youngest generation");
 120   _boundary = _g->reserved().end();
 121 }
 122 
 123 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 124 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 125 
 126 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 127     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 128 {
 129   assert(_g->level() == 0, "Optimized for youngest generation");
 130   _boundary = _g->reserved().end();


 170   assert(_g->level() == 0, "Optimized for youngest generation");
 171   _boundary = _g->reserved().end();
 172 }
 173 
 174 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 176 
 177 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 179 
 180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 181                                    KlassRemSet* klass_rem_set)
 182     : _scavenge_closure(scavenge_closure),
 183       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 184 
 185 
 186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 187                                    size_t initial_size,
 188                                    int level,
 189                                    const char* policy)
 190   : Generation(rs, initial_size, level, _dispatch_index_generation_def_new),
 191     _promo_failure_drain_in_progress(false),
 192     _should_allocate_from_space(false)
 193 {
 194   MemRegion cmr((HeapWord*)_virtual_space.low(),
 195                 (HeapWord*)_virtual_space.high());
 196   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 197 
 198   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 199     _eden_space = new ConcEdenSpace(this);
 200   } else {
 201     _eden_space = new EdenSpace(this);
 202   }
 203   _from_space = new ContiguousSpace();
 204   _to_space   = new ContiguousSpace();
 205 
 206   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 207     vm_exit_during_initialization("Could not allocate a new gen space");
 208 
 209   // Compute the maximum eden and survivor space sizes. These sizes
 210   // are computed assuming the entire reserved space is committed.


 806     const intx interval = PrefetchCopyIntervalInBytes;
 807     Prefetch::write(obj, interval);
 808 
 809     // Copy obj
 810     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 811 
 812     // Increment age if obj still in new generation
 813     obj->incr_age();
 814     age_table()->add(obj, s);
 815   }
 816 
 817   // Done, insert forward pointer to obj in this header
 818   old->forward_to(obj);
 819 
 820   return obj;
 821 }
 822 
 823 void DefNewGeneration::drain_promo_failure_scan_stack() {
 824   while (!_promo_failure_scan_stack.is_empty()) {
 825      oop obj = _promo_failure_scan_stack.pop();
 826      obj->oop_iterate<false>(_promo_failure_scan_stack_closure);
 827   }
 828 }
 829 
 830 void DefNewGeneration::save_marks() {
 831   eden()->set_saved_mark();
 832   to()->set_saved_mark();
 833   from()->set_saved_mark();
 834 }
 835 
 836 
 837 void DefNewGeneration::reset_saved_marks() {
 838   eden()->reset_saved_mark();
 839   to()->reset_saved_mark();
 840   from()->reset_saved_mark();
 841 }
 842 
 843 
 844 bool DefNewGeneration::no_allocs_since_save_marks() {
 845   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 846   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 847   return to()->saved_mark_at_top();
 848 }
















 849 
 850 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 851                                          size_t max_alloc_words) {
 852   if (requestor == this || _promotion_failed) return;
 853   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 854 
 855   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 856   if (to_space->top() > to_space->bottom()) {
 857     trace("to_space not empty when contribute_scratch called");
 858   }
 859   */
 860 
 861   ContiguousSpace* to_space = to();
 862   assert(to_space->end() >= to_space->top(), "pointers out of order");
 863   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 864   if (free_words >= MinFreeScratchWords) {
 865     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 866     sb->num_words = free_words;
 867     sb->next = list;
 868     list = sb;