122 // will cause the requesting thread to spin inside collect() until the
123 // just started marking cycle is complete - which may be a while. So
124 // we do NOT retry the GC.
125 if (!res) {
126 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
127 if (_gc_cause != GCCause::_g1_humongous_allocation) {
128 _should_retry_gc = true;
129 }
130 return;
131 }
132 }
133
134 // Try a partial collection of some kind.
135 _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
136
137 if (_pause_succeeded) {
138 if (_word_size > 0) {
139 // An allocation had been requested. Do it, eventually trying a stronger
140 // kind of GC.
141 _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
142 } else if (g1h->no_more_regions_left_for_allocation()) {
143 // There has been a request to perform a GC to free some space. We have no
144 // information on how much memory has been asked for. In case there are
145 // absolutely no regions left to allocate into, do a maximally compacting full GC.
146 log_info(gc, ergo)("Attempting maximally compacting collection");
147 _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
148 true /* clear_all_soft_refs */);
149 }
150 guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
151 } else {
152 assert(_result == NULL, "invariant");
153 // The only reason for the pause to not be successful is that, the GC locker is
154 // active (or has become active since the prologue was executed). In this case
155 // we should retry the pause after waiting for the GC locker to become inactive.
156 _should_retry_gc = true;
157 }
158 }
159
160 void VM_G1CollectForAllocation::doit_epilogue() {
161 VM_CollectForAllocation::doit_epilogue();
162
|
122 // will cause the requesting thread to spin inside collect() until the
123 // just started marking cycle is complete - which may be a while. So
124 // we do NOT retry the GC.
125 if (!res) {
126 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
127 if (_gc_cause != GCCause::_g1_humongous_allocation) {
128 _should_retry_gc = true;
129 }
130 return;
131 }
132 }
133
134 // Try a partial collection of some kind.
135 _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
136
137 if (_pause_succeeded) {
138 if (_word_size > 0) {
139 // An allocation had been requested. Do it, eventually trying a stronger
140 // kind of GC.
141 _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
142 } else if (!g1h->has_regions_left_for_allocation()) {
143 // There has been a request to perform a GC to free some space. We have no
144 // information on how much memory has been asked for. In case there are
145 // absolutely no regions left to allocate into, do a maximally compacting full GC.
146 log_info(gc, ergo)("Attempting maximally compacting collection");
147 _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
148 true /* clear_all_soft_refs */);
149 }
150 guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
151 } else {
152 assert(_result == NULL, "invariant");
153 // The only reason for the pause to not be successful is that, the GC locker is
154 // active (or has become active since the prologue was executed). In this case
155 // we should retry the pause after waiting for the GC locker to become inactive.
156 _should_retry_gc = true;
157 }
158 }
159
160 void VM_G1CollectForAllocation::doit_epilogue() {
161 VM_CollectForAllocation::doit_epilogue();
162
|