< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 47862 : imported patch 10.07.open.rebase_20171110.dcubed


  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"

  43 #include "services/heapDumper.hpp"
  44 #include "utilities/align.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {
  61     return;
  62   }


 523 }
 524 
 525 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 526   // The second disjunct in the assertion below makes a concession
 527   // for the start-up verification done while the VM is being
 528   // created. Callers be careful that you know that mutators
 529   // aren't going to interfere -- for instance, this is permissible
 530   // if we are still single-threaded and have either not yet
 531   // started allocating (nothing much to verify) or we have
 532   // started allocating but are now a full-fledged JavaThread
 533   // (and have thus made our TLAB's) available for filling.
 534   assert(SafepointSynchronize::is_at_safepoint() ||
 535          !is_init_completed(),
 536          "Should only be called at a safepoint or at start-up"
 537          " otherwise concurrent mutator activity may make heap "
 538          " unparsable again");
 539   const bool use_tlab = UseTLAB;
 540   const bool deferred = _defer_initial_card_mark;
 541   // The main thread starts allocating via a TLAB even before it
 542   // has added itself to the threads list at vm boot-up.
 543   assert(!use_tlab || Threads::first() != NULL,

 544          "Attempt to fill tlabs before main thread has been added"
 545          " to threads list is doomed to failure!");
 546   for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
 547      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
 548 #if COMPILER2_OR_JVMCI
 549      // The deferred store barriers must all have been flushed to the
 550      // card-table (or other remembered set structure) before GC starts
 551      // processing the card-table (or other remembered set).
 552      if (deferred) flush_deferred_store_barrier(thread);
 553 #else
 554      assert(!deferred, "Should be false");
 555      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
 556 #endif
 557   }
 558 }
 559 
 560 void CollectedHeap::accumulate_statistics_all_tlabs() {
 561   if (UseTLAB) {
 562     assert(SafepointSynchronize::is_at_safepoint() ||
 563          !is_init_completed(),
 564          "should only accumulate statistics on tlabs at safepoint");
 565 
 566     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "runtime/threadSMR.hpp"
  44 #include "services/heapDumper.hpp"
  45 #include "utilities/align.hpp"
  46 
  47 
  48 #ifdef ASSERT
  49 int CollectedHeap::_fire_out_of_memory_count = 0;
  50 #endif
  51 
  52 size_t CollectedHeap::_filler_array_max_size = 0;
  53 
  54 template <>
  55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  57   st->print_raw(m);
  58 }
  59 
  60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  61   if (!should_log()) {
  62     return;
  63   }


 524 }
 525 
 526 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 527   // The second disjunct in the assertion below makes a concession
 528   // for the start-up verification done while the VM is being
 529   // created. Callers be careful that you know that mutators
 530   // aren't going to interfere -- for instance, this is permissible
 531   // if we are still single-threaded and have either not yet
 532   // started allocating (nothing much to verify) or we have
 533   // started allocating but are now a full-fledged JavaThread
 534   // (and have thus made our TLAB's) available for filling.
 535   assert(SafepointSynchronize::is_at_safepoint() ||
 536          !is_init_completed(),
 537          "Should only be called at a safepoint or at start-up"
 538          " otherwise concurrent mutator activity may make heap "
 539          " unparsable again");
 540   const bool use_tlab = UseTLAB;
 541   const bool deferred = _defer_initial_card_mark;
 542   // The main thread starts allocating via a TLAB even before it
 543   // has added itself to the threads list at vm boot-up.
 544   JavaThreadIteratorWithHandle jtiwh;
 545   assert(!use_tlab || jtiwh.length() > 0,
 546          "Attempt to fill tlabs before main thread has been added"
 547          " to threads list is doomed to failure!");
 548   for (; JavaThread *thread = jtiwh.next(); ) {
 549      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
 550 #if COMPILER2_OR_JVMCI
 551      // The deferred store barriers must all have been flushed to the
 552      // card-table (or other remembered set structure) before GC starts
 553      // processing the card-table (or other remembered set).
 554      if (deferred) flush_deferred_store_barrier(thread);
 555 #else
 556      assert(!deferred, "Should be false");
 557      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
 558 #endif
 559   }
 560 }
 561 
 562 void CollectedHeap::accumulate_statistics_all_tlabs() {
 563   if (UseTLAB) {
 564     assert(SafepointSynchronize::is_at_safepoint() ||
 565          !is_init_completed(),
 566          "should only accumulate statistics on tlabs at safepoint");
 567 
 568     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();


< prev index next >