< prev index next >

src/hotspot/share/gc/shared/vmGCOperations.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/shared/allocTracer.hpp"
  29 #include "gc/shared/gcId.hpp"
  30 #include "gc/shared/gcLocker.hpp"
  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/vmGCOperations.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/oopFactory.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/init.hpp"
  38 #include "utilities/dtrace.hpp"
  39 #include "utilities/macros.hpp"
  40 #include "utilities/preserveException.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc/g1/g1CollectedHeap.inline.hpp"
  43 #include "gc/g1/g1Policy.hpp"
  44 #endif // INCLUDE_ALL_GCS
  45 
  46 VM_GC_Operation::~VM_GC_Operation() {
  47   CollectedHeap* ch = Universe::heap();
  48   ch->soft_ref_policy()->set_all_soft_refs_clear(false);
  49 }
  50 
  51 // The same dtrace probe can't be inserted in two different files, so we
  52 // have to call it here, so it's only in one file.  Can't create new probes
  53 // for the other file anymore.   The dtrace probes have to remain stable.
  54 void VM_GC_Operation::notify_gc_begin(bool full) {
  55   HOTSPOT_GC_BEGIN(
  56                    full);
  57   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  58 }
  59 
  60 void VM_GC_Operation::notify_gc_end() {
  61   HOTSPOT_GC_END();
  62   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  63 }
  64 


 176 
 177   GenCollectedHeap* gch = GenCollectedHeap::heap();
 178   GCCauseSetter gccs(gch, _gc_cause);
 179   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
 180 }
 181 
 182 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
 183                                                                  size_t size,
 184                                                                  Metaspace::MetadataType mdtype,
 185                                                                  uint gc_count_before,
 186                                                                  uint full_gc_count_before,
 187                                                                  GCCause::Cause gc_cause)
 188     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
 189       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
 190   assert(_size != 0, "An allocation should always be requested with this operation.");
 191   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
 192 }
 193 
 194 // Returns true iff concurrent GCs unloads metadata.
 195 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 196 #if INCLUDE_ALL_GCS
 197   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 198     MetaspaceGC::set_should_concurrent_collect(true);
 199     return true;
 200   }

 201 

 202   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 203     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 204     g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
 205 
 206     GCCauseSetter x(g1h, _gc_cause);
 207 
 208     // At this point we are supposed to start a concurrent cycle. We
 209     // will do so if one is not already in progress.
 210     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 211 
 212     if (should_start) {
 213       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 214       g1h->do_collection_pause_at_safepoint(pause_target);
 215     }
 216     return true;
 217   }
 218 #endif
 219 
 220   return false;
 221 }




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/shared/allocTracer.hpp"
  29 #include "gc/shared/gcId.hpp"
  30 #include "gc/shared/gcLocker.hpp"
  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/vmGCOperations.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/oopFactory.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/init.hpp"
  38 #include "utilities/dtrace.hpp"
  39 #include "utilities/macros.hpp"
  40 #include "utilities/preserveException.hpp"
  41 #if INCLUDE_G1GC
  42 #include "gc/g1/g1CollectedHeap.inline.hpp"
  43 #include "gc/g1/g1Policy.hpp"
  44 #endif // INCLUDE_G1GC
  45 
  46 VM_GC_Operation::~VM_GC_Operation() {
  47   CollectedHeap* ch = Universe::heap();
  48   ch->soft_ref_policy()->set_all_soft_refs_clear(false);
  49 }
  50 
  51 // The same dtrace probe can't be inserted in two different files, so we
  52 // have to call it here, so it's only in one file.  Can't create new probes
  53 // for the other file anymore.   The dtrace probes have to remain stable.
  54 void VM_GC_Operation::notify_gc_begin(bool full) {
  55   HOTSPOT_GC_BEGIN(
  56                    full);
  57   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  58 }
  59 
  60 void VM_GC_Operation::notify_gc_end() {
  61   HOTSPOT_GC_END();
  62   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  63 }
  64 


 176 
 177   GenCollectedHeap* gch = GenCollectedHeap::heap();
 178   GCCauseSetter gccs(gch, _gc_cause);
 179   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
 180 }
 181 
 182 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
 183                                                                  size_t size,
 184                                                                  Metaspace::MetadataType mdtype,
 185                                                                  uint gc_count_before,
 186                                                                  uint full_gc_count_before,
 187                                                                  GCCause::Cause gc_cause)
 188     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
 189       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
 190   assert(_size != 0, "An allocation should always be requested with this operation.");
 191   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
 192 }
 193 
 194 // Returns true iff concurrent GCs unloads metadata.
 195 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 196 #if INCLUDE_CMSGC
 197   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 198     MetaspaceGC::set_should_concurrent_collect(true);
 199     return true;
 200   }
 201 #endif
 202 
 203 #if INCLUDE_G1GC
 204   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 205     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 206     g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
 207 
 208     GCCauseSetter x(g1h, _gc_cause);
 209 
 210     // At this point we are supposed to start a concurrent cycle. We
 211     // will do so if one is not already in progress.
 212     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 213 
 214     if (should_start) {
 215       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 216       g1h->do_collection_pause_at_safepoint(pause_target);
 217     }
 218     return true;
 219   }
 220 #endif
 221 
 222   return false;
 223 }


< prev index next >