< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page




 145 
 146   return JNI_OK;
 147 }
 148 
 149 char* GenCollectedHeap::allocate(size_t alignment,
 150                                  ReservedSpace* heap_rs){
 151   // Now figure out the total size.
 152   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 153   assert(alignment % pageSize == 0, "Must be");
 154 
 155   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 156   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 157 
 158   // Check for overflow.
 159   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 160   if (total_reserved < young_spec->max_size()) {
 161     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 162                                   "the maximum representable size");
 163   }
 164   assert(total_reserved % alignment == 0,
 165          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 166                  SIZE_FORMAT, total_reserved, alignment));
 167 
 168   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 169   return heap_rs->base();
 170 }
 171 
 172 void GenCollectedHeap::post_initialize() {
 173   CollectedHeap::post_initialize();
 174   ref_processing_init();
 175   assert((_young_gen->kind() == Generation::DefNew) ||
 176          (_young_gen->kind() == Generation::ParNew),
 177     "Wrong youngest generation type");
 178   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 179 
 180   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 181          _old_gen->kind() == Generation::MarkSweepCompact,
 182     "Wrong generation kind");
 183 
 184   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 185                                       _old_gen->capacity(),
 186                                       def_new_gen->from()->capacity());


 874   // A scavenge may not have been attempted, or may have
 875   // been attempted and failed, because the old gen was too full
 876   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
 877       incremental_collection_will_fail(false /* don't consult_young */)) {
 878     if (PrintGCDetails) {
 879       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 880                              "because scavenge failed");
 881     }
 882     // This time allow the old gen to be collected as well
 883     do_collection(true,                // full
 884                   clear_all_soft_refs, // clear_all_soft_refs
 885                   0,                   // size
 886                   false,               // is_tlab
 887                   OldGen);             // last_generation
 888   }
 889 }
 890 
 891 bool GenCollectedHeap::is_in_young(oop p) {
 892   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 893   assert(result == _young_gen->is_in_reserved(p),
 894          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 895   return result;
 896 }
 897 
 898 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 899 bool GenCollectedHeap::is_in(const void* p) const {
 900   return _young_gen->is_in(p) || _old_gen->is_in(p);
 901 }
 902 
 903 #ifdef ASSERT
 904 // Don't implement this by using is_in_young().  This method is used
 905 // in some cases to check that is_in_young() is correct.
 906 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 907   assert(is_in_reserved(p) || p == NULL,
 908     "Does not work if address is non-null and outside of the heap");
 909   return p < _young_gen->reserved().end() && p != NULL;
 910 }
 911 #endif
 912 
 913 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 914   NoHeaderExtendedOopClosure no_header_cl(cl);




 145 
 146   return JNI_OK;
 147 }
 148 
 149 char* GenCollectedHeap::allocate(size_t alignment,
 150                                  ReservedSpace* heap_rs){
 151   // Now figure out the total size.
 152   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 153   assert(alignment % pageSize == 0, "Must be");
 154 
 155   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 156   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 157 
 158   // Check for overflow.
 159   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 160   if (total_reserved < young_spec->max_size()) {
 161     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 162                                   "the maximum representable size");
 163   }
 164   assert(total_reserved % alignment == 0,
 165          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 166          SIZE_FORMAT, total_reserved, alignment);
 167 
 168   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 169   return heap_rs->base();
 170 }
 171 
 172 void GenCollectedHeap::post_initialize() {
 173   CollectedHeap::post_initialize();
 174   ref_processing_init();
 175   assert((_young_gen->kind() == Generation::DefNew) ||
 176          (_young_gen->kind() == Generation::ParNew),
 177     "Wrong youngest generation type");
 178   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 179 
 180   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 181          _old_gen->kind() == Generation::MarkSweepCompact,
 182     "Wrong generation kind");
 183 
 184   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 185                                       _old_gen->capacity(),
 186                                       def_new_gen->from()->capacity());


 874   // A scavenge may not have been attempted, or may have
 875   // been attempted and failed, because the old gen was too full
 876   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
 877       incremental_collection_will_fail(false /* don't consult_young */)) {
 878     if (PrintGCDetails) {
 879       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 880                              "because scavenge failed");
 881     }
 882     // This time allow the old gen to be collected as well
 883     do_collection(true,                // full
 884                   clear_all_soft_refs, // clear_all_soft_refs
 885                   0,                   // size
 886                   false,               // is_tlab
 887                   OldGen);             // last_generation
 888   }
 889 }
 890 
 891 bool GenCollectedHeap::is_in_young(oop p) {
 892   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 893   assert(result == _young_gen->is_in_reserved(p),
 894          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 895   return result;
 896 }
 897 
 898 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 899 bool GenCollectedHeap::is_in(const void* p) const {
 900   return _young_gen->is_in(p) || _old_gen->is_in(p);
 901 }
 902 
 903 #ifdef ASSERT
 904 // Don't implement this by using is_in_young().  This method is used
 905 // in some cases to check that is_in_young() is correct.
 906 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 907   assert(is_in_reserved(p) || p == NULL,
 908     "Does not work if address is non-null and outside of the heap");
 909   return p < _young_gen->reserved().end() && p != NULL;
 910 }
 911 #endif
 912 
 913 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 914   NoHeaderExtendedOopClosure no_header_cl(cl);


< prev index next >