< prev index next >

src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 7209 : [mq]: inccms


 177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 178 
 179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 180                                    KlassRemSet* klass_rem_set)
 181     : _scavenge_closure(scavenge_closure),
 182       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 183 
 184 
 185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 186                                    size_t initial_size,
 187                                    int level,
 188                                    const char* policy)
 189   : Generation(rs, initial_size, level),
 190     _promo_failure_drain_in_progress(false),
 191     _should_allocate_from_space(false)
 192 {
 193   MemRegion cmr((HeapWord*)_virtual_space.low(),
 194                 (HeapWord*)_virtual_space.high());
 195   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 196 
 197   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 198     _eden_space = new ConcEdenSpace(this);
 199   } else {
 200     _eden_space = new EdenSpace(this);
 201   }
 202   _from_space = new ContiguousSpace();
 203   _to_space   = new ContiguousSpace();
 204 
 205   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 206     vm_exit_during_initialization("Could not allocate a new gen space");
 207 
 208   // Compute the maximum eden and survivor space sizes. These sizes
 209   // are computed assuming the entire reserved space is committed.
 210   // These values are exported as performance counters.
 211   uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 212   uintx size = _virtual_space.reserved_size();
 213   _max_survivor_size = compute_survivor_size(size, alignment);
 214   _max_eden_size = size - (2*_max_survivor_size);
 215 
 216   // allocate the performance counters
 217   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 218 
 219   // Generation counters -- generation 0, 3 subspaces
 220   _gen_counters = new GenerationCounters("new", 0, 3,
 221       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);


1021   return "def new generation";
1022 }
1023 
1024 // Moved from inline file as they are not called inline
1025 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1026   return eden();
1027 }
1028 
1029 HeapWord* DefNewGeneration::allocate(size_t word_size,
1030                                      bool is_tlab) {
1031   // This is the slow-path allocation for the DefNewGeneration.
1032   // Most allocations are fast-path in compiled code.
1033   // We try to allocate from the eden.  If that works, we are happy.
1034   // Note that since DefNewGeneration supports lock-free allocation, we
1035   // have to use it here, as well.
1036   HeapWord* result = eden()->par_allocate(word_size);
1037   if (result != NULL) {
1038     if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1039       _next_gen->sample_eden_chunk();
1040     }
1041     return result;
1042   }
1043   do {
1044     HeapWord* old_limit = eden()->soft_end();
1045     if (old_limit < eden()->end()) {
1046       // Tell the next generation we reached a limit.
1047       HeapWord* new_limit =
1048         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1049       if (new_limit != NULL) {
1050         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1051       } else {
1052         assert(eden()->soft_end() == eden()->end(),
1053                "invalid state after allocation_limit_reached returned null");
1054       }
1055     } else {
1056       // The allocation failed and the soft limit is equal to the hard limit,
1057       // there are no reasons to do an attempt to allocate
1058       assert(old_limit == eden()->end(), "sanity check");
1059       break;
1060     }
1061     // Try to allocate until succeeded or the soft limit can't be adjusted
1062     result = eden()->par_allocate(word_size);
1063   } while (result == NULL);
1064 
1065   // If the eden is full and the last collection bailed out, we are running
1066   // out of heap space, and we try to allocate the from-space, too.
1067   // allocate_from_space can't be inlined because that would introduce a
1068   // circular dependency at compile time.
1069   if (result == NULL) {
1070     result = allocate_from_space(word_size);
1071   } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1072     _next_gen->sample_eden_chunk();
1073   }
1074   return result;
1075 }
1076 
1077 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1078                                          bool is_tlab) {
1079   HeapWord* res = eden()->par_allocate(word_size);
1080   if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1081     _next_gen->sample_eden_chunk();
1082   }
1083   return res;
1084 }
1085 
1086 void DefNewGeneration::gc_prologue(bool full) {
1087   // Ensure that _end and _soft_end are the same in eden space.
1088   eden()->set_soft_end(eden()->end());
1089 }
1090 
1091 size_t DefNewGeneration::tlab_capacity() const {
1092   return eden()->capacity();
1093 }
1094 
1095 size_t DefNewGeneration::tlab_used() const {
1096   return eden()->used();
1097 }
1098 
1099 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1100   return unsafe_max_alloc_nogc();
1101 }


 177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 178 
 179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 180                                    KlassRemSet* klass_rem_set)
 181     : _scavenge_closure(scavenge_closure),
 182       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 183 
 184 
 185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 186                                    size_t initial_size,
 187                                    int level,
 188                                    const char* policy)
 189   : Generation(rs, initial_size, level),
 190     _promo_failure_drain_in_progress(false),
 191     _should_allocate_from_space(false)
 192 {
 193   MemRegion cmr((HeapWord*)_virtual_space.low(),
 194                 (HeapWord*)_virtual_space.high());
 195   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 196 
 197   _eden_space = new ContiguousSpace();




 198   _from_space = new ContiguousSpace();
 199   _to_space   = new ContiguousSpace();
 200 
 201   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 202     vm_exit_during_initialization("Could not allocate a new gen space");
 203 
 204   // Compute the maximum eden and survivor space sizes. These sizes
 205   // are computed assuming the entire reserved space is committed.
 206   // These values are exported as performance counters.
 207   uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 208   uintx size = _virtual_space.reserved_size();
 209   _max_survivor_size = compute_survivor_size(size, alignment);
 210   _max_eden_size = size - (2*_max_survivor_size);
 211 
 212   // allocate the performance counters
 213   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 214 
 215   // Generation counters -- generation 0, 3 subspaces
 216   _gen_counters = new GenerationCounters("new", 0, 3,
 217       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);


1017   return "def new generation";
1018 }
1019 
1020 // Moved from inline file as they are not called inline
1021 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1022   return eden();
1023 }
1024 
1025 HeapWord* DefNewGeneration::allocate(size_t word_size,
1026                                      bool is_tlab) {
1027   // This is the slow-path allocation for the DefNewGeneration.
1028   // Most allocations are fast-path in compiled code.
1029   // We try to allocate from the eden.  If that works, we are happy.
1030   // Note that since DefNewGeneration supports lock-free allocation, we
1031   // have to use it here, as well.
1032   HeapWord* result = eden()->par_allocate(word_size);
1033   if (result != NULL) {
1034     if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1035       _next_gen->sample_eden_chunk();
1036     }














1037   } else {









1038     // If the eden is full and the last collection bailed out, we are running
1039     // out of heap space, and we try to allocate the from-space, too.
1040     // allocate_from_space can't be inlined because that would introduce a
1041     // circular dependency at compile time.

1042     result = allocate_from_space(word_size);


1043   }
1044   return result;
1045 }
1046 
1047 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1048                                          bool is_tlab) {
1049   HeapWord* res = eden()->par_allocate(word_size);
1050   if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1051     _next_gen->sample_eden_chunk();
1052   }
1053   return res;





1054 }
1055 
1056 size_t DefNewGeneration::tlab_capacity() const {
1057   return eden()->capacity();
1058 }
1059 
1060 size_t DefNewGeneration::tlab_used() const {
1061   return eden()->used();
1062 }
1063 
1064 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1065   return unsafe_max_alloc_nogc();
1066 }
< prev index next >