121 // will cause the requesting thread to spin inside collect() until the
122 // just started marking cycle is complete - which may be a while. So
123 // we do NOT retry the GC.
124 if (!res) {
125 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
126 if (_gc_cause != GCCause::_g1_humongous_allocation) {
127 _should_retry_gc = true;
128 }
129 return;
130 }
131 }
132
133 // Try a partial collection of some kind.
134 _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
135
136 if (_pause_succeeded) {
137 if (_word_size > 0) {
138 // An allocation had been requested. Do it, eventually trying a stronger
139 // kind of GC.
140 _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
141 } else if (!g1h->has_regions_left_for_allocation()) {
142 // There has been a request to perform a GC to free some space. We have no
143 // information on how much memory has been asked for. In case there are
144 // absolutely no regions left to allocate into, do a maximally compacting full GC.
145 log_info(gc, ergo)("Attempting maximally compacting collection");
146 _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
147 true /* clear_all_soft_refs */);
148 }
149 guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
150 } else {
151 assert(_result == NULL, "invariant");
152 // The only reason for the pause to not be successful is that, the GC locker is
153 // active (or has become active since the prologue was executed). In this case
154 // we should retry the pause after waiting for the GC locker to become inactive.
155 _should_retry_gc = true;
156 }
157 }
158
159 void VM_G1CollectForAllocation::doit_epilogue() {
160 VM_CollectForAllocation::doit_epilogue();
161
162 // If the pause was initiated by a System.gc() and
163 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
164 // that just started (or maybe one that was already in progress) to
165 // finish.
166 if (GCCause::is_user_requested_gc(_gc_cause) &&
|
121 // will cause the requesting thread to spin inside collect() until the
122 // just started marking cycle is complete - which may be a while. So
123 // we do NOT retry the GC.
124 if (!res) {
125 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
126 if (_gc_cause != GCCause::_g1_humongous_allocation) {
127 _should_retry_gc = true;
128 }
129 return;
130 }
131 }
132
133 // Try a partial collection of some kind.
134 _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
135
136 if (_pause_succeeded) {
137 if (_word_size > 0) {
138 // An allocation had been requested. Do it, eventually trying a stronger
139 // kind of GC.
140 _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
141 } else {
142 bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
143 !g1h->has_regions_left_for_allocation();
144 if (should_upgrade_to_full) {
145 // There has been a request to perform a GC to free some space. We have no
146 // information on how much memory has been asked for. In case there are
147 // absolutely no regions left to allocate into, do a maximally compacting full GC.
148 log_info(gc, ergo)("Attempting maximally compacting collection");
149 _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
150 true /* clear_all_soft_refs */);
151 }
152 }
153 guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
154 } else {
155 assert(_result == NULL, "invariant");
156 // The only reason for the pause to not be successful is that, the GC locker is
157 // active (or has become active since the prologue was executed). In this case
158 // we should retry the pause after waiting for the GC locker to become inactive.
159 _should_retry_gc = true;
160 }
161 }
162
163 void VM_G1CollectForAllocation::doit_epilogue() {
164 VM_CollectForAllocation::doit_epilogue();
165
166 // If the pause was initiated by a System.gc() and
167 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
168 // that just started (or maybe one that was already in progress) to
169 // finish.
170 if (GCCause::is_user_requested_gc(_gc_cause) &&
|