< prev index next >

src/hotspot/share/gc/shared/genCollectedHeap.cpp

Print this page




 133 
 134   os::trace_page_sizes("Heap",
 135                        collector_policy()->min_heap_byte_size(),
 136                        total_reserved,
 137                        alignment,
 138                        heap_rs->base(),
 139                        heap_rs->size());
 140 
 141   return heap_rs->base();
 142 }
 143 
 144 void GenCollectedHeap::post_initialize() {
 145   CollectedHeap::post_initialize();
 146   ref_processing_init();
 147   check_gen_kinds();
 148   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 149 
 150   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 151                                       _old_gen->capacity(),
 152                                       def_new_gen->from()->capacity());




 153   _gen_policy->initialize_gc_policy_counters();
 154 }
 155 
 156 void GenCollectedHeap::ref_processing_init() {
 157   _young_gen->ref_processor_init();
 158   _old_gen->ref_processor_init();
 159 }
 160 
 161 size_t GenCollectedHeap::capacity() const {
 162   return _young_gen->capacity() + _old_gen->capacity();
 163 }
 164 
 165 size_t GenCollectedHeap::used() const {
 166   return _young_gen->used() + _old_gen->used();
 167 }
 168 
 169 void GenCollectedHeap::save_used_regions() {
 170   _old_gen->save_used_region();
 171   _young_gen->save_used_region();
 172 }


 314       // Read the gc count while the heap lock is held.
 315       gc_count_before = total_collections();
 316     }
 317 
 318     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 319     VMThread::execute(&op);
 320     if (op.prologue_succeeded()) {
 321       result = op.result();
 322       if (op.gc_locked()) {
 323          assert(result == NULL, "must be NULL if gc_locked() is true");
 324          continue;  // Retry and/or stall as necessary.
 325       }
 326 
 327       // Allocation has failed and a collection
 328       // has been done.  If the gc time limit was exceeded the
 329       // this time, return NULL so that an out-of-memory
 330       // will be thrown.  Clear gc_overhead_limit_exceeded
 331       // so that the overhead exceeded does not persist.
 332 
 333       const bool limit_exceeded = gen_policy()->size_policy()->gc_overhead_limit_exceeded();
 334       const bool softrefs_clear = gen_policy()->all_soft_refs_clear();
 335 
 336       if (limit_exceeded && softrefs_clear) {
 337         *gc_overhead_limit_was_exceeded = true;
 338         gen_policy()->size_policy()->set_gc_overhead_limit_exceeded(false);
 339         if (op.result() != NULL) {
 340           CollectedHeap::fill_with_object(op.result(), size);
 341         }
 342         return NULL;
 343       }
 344       assert(result == NULL || is_in_reserved(result),
 345              "result not in heap");
 346       return result;
 347     }
 348 
 349     // Give a warning if we seem to be looping forever.
 350     if ((QueuedAllocationWarningCount > 0) &&
 351         (try_count % QueuedAllocationWarningCount == 0)) {
 352           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 353                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 354     }


 505                                      bool           is_tlab,
 506                                      GenerationType max_generation) {
 507   ResourceMark rm;
 508   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 509 
 510   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 511   assert(my_thread->is_VM_thread() ||
 512          my_thread->is_ConcurrentGC_thread(),
 513          "incorrect thread type capability");
 514   assert(Heap_lock->is_locked(),
 515          "the requesting thread should have the Heap_lock");
 516   guarantee(!is_gc_active(), "collection is not reentrant");
 517 
 518   if (GCLocker::check_active_before_gc()) {
 519     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 520   }
 521 
 522   GCIdMark gc_id_mark;
 523 
 524   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 525                           collector_policy()->should_clear_all_soft_refs();
 526 
 527   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 528 
 529   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 530 
 531   print_heap_before_gc();
 532 
 533   {
 534     FlagSetting fl(_is_gc_active, true);
 535 
 536     bool complete = full && (max_generation == OldGen);
 537     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 538     bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 539 
 540     FormatBuffer<> gc_string("%s", "Pause ");
 541     if (do_young_collection) {
 542       gc_string.append("Young");
 543     } else {
 544       gc_string.append("Full");
 545     }
 546 
 547     GCTraceCPUTime tcpu;


 703   // we can to reclaim memory. Force collection of soft references. Force
 704   // a complete compaction of the heap. Any additional methods for finding
 705   // free memory should be here, especially if they are expensive. If this
 706   // attempt fails, an OOM exception will be thrown.
 707   {
 708     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 709 
 710     do_collection(true,                      // full
 711                   true,                      // clear_all_soft_refs
 712                   size,                      // size
 713                   is_tlab,                   // is_tlab
 714                   GenCollectedHeap::OldGen); // max_generation
 715   }
 716 
 717   result = attempt_allocation(size, is_tlab, false /* first_only */);
 718   if (result != NULL) {
 719     assert(is_in_reserved(result), "result not in heap");
 720     return result;
 721   }
 722 
 723   assert(!gen_policy()->should_clear_all_soft_refs(),
 724     "Flag should have been handled and cleared prior to this point");
 725 
 726   // What else?  We might try synchronous finalization later.  If the total
 727   // space available is large enough for the allocation, then a more
 728   // complete compaction phase than we've tried so far might be
 729   // appropriate.
 730   return NULL;
 731 }
 732 
 733 #ifdef ASSERT
 734 class AssertNonScavengableClosure: public OopClosure {
 735 public:
 736   virtual void do_oop(oop* p) {
 737     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 738       "Referent should not be scavengable.");  }
 739   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 740 };
 741 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 742 #endif
 743 




 133 
 134   os::trace_page_sizes("Heap",
 135                        collector_policy()->min_heap_byte_size(),
 136                        total_reserved,
 137                        alignment,
 138                        heap_rs->base(),
 139                        heap_rs->size());
 140 
 141   return heap_rs->base();
 142 }
 143 
 144 void GenCollectedHeap::post_initialize() {
 145   CollectedHeap::post_initialize();
 146   ref_processing_init();
 147   check_gen_kinds();
 148   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 149 
 150   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 151                                       _old_gen->capacity(),
 152                                       def_new_gen->from()->capacity());
 153 
 154   // Connect the two policies.
 155   _soft_ref_gen_policy.set_size_policy(_gen_policy->size_policy());
 156 
 157   _gen_policy->initialize_gc_policy_counters();
 158 }
 159 
 160 void GenCollectedHeap::ref_processing_init() {
 161   _young_gen->ref_processor_init();
 162   _old_gen->ref_processor_init();
 163 }
 164 
 165 size_t GenCollectedHeap::capacity() const {
 166   return _young_gen->capacity() + _old_gen->capacity();
 167 }
 168 
 169 size_t GenCollectedHeap::used() const {
 170   return _young_gen->used() + _old_gen->used();
 171 }
 172 
 173 void GenCollectedHeap::save_used_regions() {
 174   _old_gen->save_used_region();
 175   _young_gen->save_used_region();
 176 }


 318       // Read the gc count while the heap lock is held.
 319       gc_count_before = total_collections();
 320     }
 321 
 322     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 323     VMThread::execute(&op);
 324     if (op.prologue_succeeded()) {
 325       result = op.result();
 326       if (op.gc_locked()) {
 327          assert(result == NULL, "must be NULL if gc_locked() is true");
 328          continue;  // Retry and/or stall as necessary.
 329       }
 330 
 331       // Allocation has failed and a collection
 332       // has been done.  If the gc time limit was exceeded the
 333       // this time, return NULL so that an out-of-memory
 334       // will be thrown.  Clear gc_overhead_limit_exceeded
 335       // so that the overhead exceeded does not persist.
 336 
 337       const bool limit_exceeded = gen_policy()->size_policy()->gc_overhead_limit_exceeded();
 338       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 339 
 340       if (limit_exceeded && softrefs_clear) {
 341         *gc_overhead_limit_was_exceeded = true;
 342         gen_policy()->size_policy()->set_gc_overhead_limit_exceeded(false);
 343         if (op.result() != NULL) {
 344           CollectedHeap::fill_with_object(op.result(), size);
 345         }
 346         return NULL;
 347       }
 348       assert(result == NULL || is_in_reserved(result),
 349              "result not in heap");
 350       return result;
 351     }
 352 
 353     // Give a warning if we seem to be looping forever.
 354     if ((QueuedAllocationWarningCount > 0) &&
 355         (try_count % QueuedAllocationWarningCount == 0)) {
 356           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 357                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 358     }


 509                                      bool           is_tlab,
 510                                      GenerationType max_generation) {
 511   ResourceMark rm;
 512   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 513 
 514   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 515   assert(my_thread->is_VM_thread() ||
 516          my_thread->is_ConcurrentGC_thread(),
 517          "incorrect thread type capability");
 518   assert(Heap_lock->is_locked(),
 519          "the requesting thread should have the Heap_lock");
 520   guarantee(!is_gc_active(), "collection is not reentrant");
 521 
 522   if (GCLocker::check_active_before_gc()) {
 523     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 524   }
 525 
 526   GCIdMark gc_id_mark;
 527 
 528   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 529                           soft_ref_policy()->should_clear_all_soft_refs();
 530 
 531   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 532 
 533   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 534 
 535   print_heap_before_gc();
 536 
 537   {
 538     FlagSetting fl(_is_gc_active, true);
 539 
 540     bool complete = full && (max_generation == OldGen);
 541     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 542     bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 543 
 544     FormatBuffer<> gc_string("%s", "Pause ");
 545     if (do_young_collection) {
 546       gc_string.append("Young");
 547     } else {
 548       gc_string.append("Full");
 549     }
 550 
 551     GCTraceCPUTime tcpu;


 707   // we can to reclaim memory. Force collection of soft references. Force
 708   // a complete compaction of the heap. Any additional methods for finding
 709   // free memory should be here, especially if they are expensive. If this
 710   // attempt fails, an OOM exception will be thrown.
 711   {
 712     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 713 
 714     do_collection(true,                      // full
 715                   true,                      // clear_all_soft_refs
 716                   size,                      // size
 717                   is_tlab,                   // is_tlab
 718                   GenCollectedHeap::OldGen); // max_generation
 719   }
 720 
 721   result = attempt_allocation(size, is_tlab, false /* first_only */);
 722   if (result != NULL) {
 723     assert(is_in_reserved(result), "result not in heap");
 724     return result;
 725   }
 726 
 727   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 728     "Flag should have been handled and cleared prior to this point");
 729 
 730   // What else?  We might try synchronous finalization later.  If the total
 731   // space available is large enough for the allocation, then a more
 732   // complete compaction phase than we've tried so far might be
 733   // appropriate.
 734   return NULL;
 735 }
 736 
 737 #ifdef ASSERT
 738 class AssertNonScavengableClosure: public OopClosure {
 739 public:
 740   virtual void do_oop(oop* p) {
 741     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 742       "Referent should not be scavengable.");  }
 743   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 744 };
 745 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 746 #endif
 747 


< prev index next >