< prev index next >

src/hotspot/share/gc/g1/g1VMOperations.cpp

Print this page
rev 60059 : imported patch 8210462-fix-remaining-mentions-of-im


  40   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  41   GCCauseSetter x(g1h, _gc_cause);
  42   _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */);
  43 }
  44 
  45 VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
  46                                                    GCCause::Cause gc_cause,
  47                                                    double target_pause_time_ms) :
  48   VM_GC_Operation(gc_count_before, gc_cause),
  49   _target_pause_time_ms(target_pause_time_ms),
  50   _transient_failure(false),
  51   _cycle_already_in_progress(false),
  52   _whitebox_attached(false),
  53   _terminating(false),
  54   _gc_succeeded(false)
  55 {}
  56 
  57 bool VM_G1TryInitiateConcMark::doit_prologue() {
  58   bool result = VM_GC_Operation::doit_prologue();
  59   // The prologue can fail for a couple of reasons. The first is that another GC
  60   // got scheduled and prevented the scheduling of the initial mark GC. The
  61   // second is that the GC locker may be active and the heap can't be expanded.
  62   // In both cases we want to retry the GC so that the initial mark pause is
  63   // actually scheduled. In the second case, however, we should stall until
  64   // until the GC locker is no longer active and then retry the initial mark GC.
  65   if (!result) _transient_failure = true;
  66   return result;
  67 }
  68 
  69 void VM_G1TryInitiateConcMark::doit() {
  70   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  71 
  72   GCCauseSetter x(g1h, _gc_cause);
  73 
  74   // Record for handling by caller.
  75   _terminating = g1h->_cm_thread->should_terminate();
  76 
  77   if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) {
  78     // When terminating, the request to initiate a concurrent cycle will be
  79     // ignored by do_collection_pause_at_safepoint; instead it will just do
  80     // a young-only or mixed GC (depending on phase).  For a user request
  81     // there's no point in even doing that much, so done.  For some non-user
  82     // requests the alternative GC might still be needed.
  83   } else if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) {
  84     // Failure to force the next GC pause to be an initial mark indicates
  85     // there is already a concurrent marking cycle in progress.  Set flag
  86     // to notify the caller and return immediately.
  87     _cycle_already_in_progress = true;
  88   } else if ((_gc_cause != GCCause::_wb_breakpoint) &&
  89              ConcurrentGCBreakpoints::is_controlled()) {
  90     // WhiteBox wants to be in control of concurrent cycles, so don't try to
  91     // start one.  This check is after the force_initial_mark_xxx so that a
  92     // request will be remembered for a later partial collection, even though
  93     // we've rejected this request.
  94     _whitebox_attached = true;
  95   } else if (g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) {
  96     _gc_succeeded = true;
  97   } else {
  98     // Failure to perform the collection at all occurs because GCLocker is
  99     // active, and we have the bad luck to be the collection request that
 100     // makes a later _gc_locker collection needed.  (Else we would have hit
 101     // the GCLocker check in the prologue.)
 102     _transient_failure = true;
 103   }
 104 }
 105 
 106 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t         word_size,
 107                                                      uint           gc_count_before,
 108                                                      GCCause::Cause gc_cause,
 109                                                      double         target_pause_time_ms) :
 110   VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
 111   _gc_succeeded(false),




  40   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  41   GCCauseSetter x(g1h, _gc_cause);
  42   _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */);
  43 }
  44 
  45 VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
  46                                                    GCCause::Cause gc_cause,
  47                                                    double target_pause_time_ms) :
  48   VM_GC_Operation(gc_count_before, gc_cause),
  49   _target_pause_time_ms(target_pause_time_ms),
  50   _transient_failure(false),
  51   _cycle_already_in_progress(false),
  52   _whitebox_attached(false),
  53   _terminating(false),
  54   _gc_succeeded(false)
  55 {}
  56 
  57 bool VM_G1TryInitiateConcMark::doit_prologue() {
  58   bool result = VM_GC_Operation::doit_prologue();
  59   // The prologue can fail for a couple of reasons. The first is that another GC
  60   // got scheduled and prevented the scheduling of the concurrent start GC. The
  61   // second is that the GC locker may be active and the heap can't be expanded.
  62   // In both cases we want to retry the GC so that the concurrent start pause is
  63   // actually scheduled. In the second case, however, we should stall until
  64   // until the GC locker is no longer active and then retry the concurrent start GC.
  65   if (!result) _transient_failure = true;
  66   return result;
  67 }
  68 
  69 void VM_G1TryInitiateConcMark::doit() {
  70   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  71 
  72   GCCauseSetter x(g1h, _gc_cause);
  73 
  74   // Record for handling by caller.
  75   _terminating = g1h->_cm_thread->should_terminate();
  76 
  77   if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) {
  78     // When terminating, the request to initiate a concurrent cycle will be
  79     // ignored by do_collection_pause_at_safepoint; instead it will just do
  80     // a young-only or mixed GC (depending on phase).  For a user request
  81     // there's no point in even doing that much, so done.  For some non-user
  82     // requests the alternative GC might still be needed.
  83   } else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) {
  84     // Failure to force the next GC pause to be a concurrent start indicates
  85     // there is already a concurrent marking cycle in progress.  Set flag
  86     // to notify the caller and return immediately.
  87     _cycle_already_in_progress = true;
  88   } else if ((_gc_cause != GCCause::_wb_breakpoint) &&
  89              ConcurrentGCBreakpoints::is_controlled()) {
  90     // WhiteBox wants to be in control of concurrent cycles, so don't try to
  91     // start one.  This check is after the force_concurrent_start_xxx so that a
  92     // request will be remembered for a later partial collection, even though
  93     // we've rejected this request.
  94     _whitebox_attached = true;
  95   } else if (g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) {
  96     _gc_succeeded = true;
  97   } else {
  98     // Failure to perform the collection at all occurs because GCLocker is
  99     // active, and we have the bad luck to be the collection request that
 100     // makes a later _gc_locker collection needed.  (Else we would have hit
 101     // the GCLocker check in the prologue.)
 102     _transient_failure = true;
 103   }
 104 }
 105 
 106 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t         word_size,
 107                                                      uint           gc_count_before,
 108                                                      GCCause::Cause gc_cause,
 109                                                      double         target_pause_time_ms) :
 110   VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
 111   _gc_succeeded(false),


< prev index next >