< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 13298 : 8184751: Provide thread pool for parallel safepoint cleanup


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"

  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/heapDumper.hpp"
  44 #include "utilities/align.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");


 163 // default implementations, for collectors which don't support this
 164 // feature.
 165 bool CollectedHeap::supports_concurrent_phase_control() const {
 166   return false;
 167 }
 168 
 169 const char* const* CollectedHeap::concurrent_phases() const {
 170   static const char* const result[] = { NULL };
 171   return result;
 172 }
 173 
 174 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 175   return false;
 176 }
 177 
 178 // Memory state functions.
 179 
 180 
 181 CollectedHeap::CollectedHeap() :
 182   _barrier_set(NULL),

 183   _is_gc_active(false),
 184   _total_collections(0),
 185   _total_full_collections(0),
 186   _gc_cause(GCCause::_no_gc),
 187   _gc_lastcause(GCCause::_no_gc),
 188   _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
 189 {
 190   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 191   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 192   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 193                                              max_len / elements_per_word);
 194 
 195   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 196   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 197 
 198   if (UsePerfData) {
 199     EXCEPTION_MARK;
 200 
 201     // create the gc cause jvmstat counters
 202     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 203                              80, GCCause::to_string(_gc_cause), CHECK);
 204 
 205     _perf_gc_lastcause =
 206                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 207                              80, GCCause::to_string(_gc_lastcause), CHECK);
 208   }
 209 
 210   // Create the ring log
 211   if (LogEvents) {
 212     _gc_heap_log = new GCHeapLog();
 213   } else {
 214     _gc_heap_log = NULL;







 215   }
 216 }
 217 
 218 // This interface assumes that it's being called by the
 219 // vm thread. It collects the heap assuming that the
 220 // heap lock is already held and that we are executing in
 221 // the context of the vm thread.
 222 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 223   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 224   assert(Heap_lock->is_locked(), "Precondition#2");
 225   GCCauseSetter gcs(this, cause);
 226   switch (cause) {
 227     case GCCause::_heap_inspection:
 228     case GCCause::_heap_dump:
 229     case GCCause::_metadata_GC_threshold : {
 230       HandleMark hm;
 231       do_full_collection(false);        // don't clear all soft refs
 232       break;
 233     }
 234     case GCCause::_metadata_GC_clear_soft_refs: {




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "gc/shared/workgroup.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/metaspace.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/instanceMirrorKlass.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 #include "utilities/align.hpp"
  46 
  47 
  48 #ifdef ASSERT
  49 int CollectedHeap::_fire_out_of_memory_count = 0;
  50 #endif
  51 
  52 size_t CollectedHeap::_filler_array_max_size = 0;
  53 
  54 template <>
  55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");


 164 // default implementations, for collectors which don't support this
 165 // feature.
 166 bool CollectedHeap::supports_concurrent_phase_control() const {
 167   return false;
 168 }
 169 
 170 const char* const* CollectedHeap::concurrent_phases() const {
 171   static const char* const result[] = { NULL };
 172   return result;
 173 }
 174 
 175 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 176   return false;
 177 }
 178 
 179 // Memory state functions.
 180 
 181 
 182 CollectedHeap::CollectedHeap() :
 183   _barrier_set(NULL),
 184   _safepoint_workers(NULL),
 185   _is_gc_active(false),
 186   _total_collections(0),
 187   _total_full_collections(0),
 188   _gc_cause(GCCause::_no_gc),
 189   _gc_lastcause(GCCause::_no_gc),
 190   _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
 191 {
 192   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 193   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 194   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 195                                              max_len / elements_per_word);
 196 
 197   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 198   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 199 
 200   if (UsePerfData) {
 201     EXCEPTION_MARK;
 202 
 203     // create the gc cause jvmstat counters
 204     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 205                              80, GCCause::to_string(_gc_cause), CHECK);
 206 
 207     _perf_gc_lastcause =
 208                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 209                              80, GCCause::to_string(_gc_lastcause), CHECK);
 210   }
 211 
 212   // Create the ring log
 213   if (LogEvents) {
 214     _gc_heap_log = new GCHeapLog();
 215   } else {
 216     _gc_heap_log = NULL;
 217   }
 218 
 219   if (ParallelSafepointCleanupThreads > 1) {
 220     _safepoint_workers = new WorkGang("Safepoint Cleanup Thread", ParallelSafepointCleanupThreads,
 221                                       /* are_GC_task_threads */ false,
 222                                       /* are_ConcurrentGC_threads */ false);
 223     _safepoint_workers->initialize_workers();
 224   }
 225 }
 226 
 227 // This interface assumes that it's being called by the
 228 // vm thread. It collects the heap assuming that the
 229 // heap lock is already held and that we are executing in
 230 // the context of the vm thread.
 231 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 232   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 233   assert(Heap_lock->is_locked(), "Precondition#2");
 234   GCCauseSetter gcs(this, cause);
 235   switch (cause) {
 236     case GCCause::_heap_inspection:
 237     case GCCause::_heap_dump:
 238     case GCCause::_metadata_GC_threshold : {
 239       HandleMark hm;
 240       do_full_collection(false);        // don't clear all soft refs
 241       break;
 242     }
 243     case GCCause::_metadata_GC_clear_soft_refs: {


< prev index next >