181 uint gc_count_before,
182 uint full_gc_count_before,
183 GCCause::Cause gc_cause)
184 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
185 _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
186 assert(_size != 0, "An allocation should always be requested with this operation.");
187 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
188 }
189
190 // Returns true iff concurrent GCs unloads metadata.
191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
192 #if INCLUDE_G1GC
193 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
194 G1CollectedHeap* g1h = G1CollectedHeap::heap();
195 g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
196
197 GCCauseSetter x(g1h, _gc_cause);
198
199 // At this point we are supposed to start a concurrent cycle. We
200 // will do so if one is not already in progress.
201 bool should_start = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause);
202
203 if (should_start) {
204 double pause_target = g1h->policy()->max_pause_time_ms();
205 g1h->do_collection_pause_at_safepoint(pause_target);
206 }
207 return true;
208 }
209 #endif
210
211 return false;
212 }
213
214 void VM_CollectForMetadataAllocation::doit() {
215 SvcGCMarker sgcm(SvcGCMarker::FULL);
216
217 CollectedHeap* heap = Universe::heap();
218 GCCauseSetter gccs(heap, _gc_cause);
219
220 // Check again if the space is available. Another thread
221 // may have similarly failed a metadata allocation and induced
|
181 uint gc_count_before,
182 uint full_gc_count_before,
183 GCCause::Cause gc_cause)
184 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
185 _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
186 assert(_size != 0, "An allocation should always be requested with this operation.");
187 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
188 }
189
190 // Returns true iff concurrent GCs unloads metadata.
191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
192 #if INCLUDE_G1GC
193 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
194 G1CollectedHeap* g1h = G1CollectedHeap::heap();
195 g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
196
197 GCCauseSetter x(g1h, _gc_cause);
198
199 // At this point we are supposed to start a concurrent cycle. We
200 // will do so if one is not already in progress.
201 bool should_start = g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause);
202
203 if (should_start) {
204 double pause_target = g1h->policy()->max_pause_time_ms();
205 g1h->do_collection_pause_at_safepoint(pause_target);
206 }
207 return true;
208 }
209 #endif
210
211 return false;
212 }
213
214 void VM_CollectForMetadataAllocation::doit() {
215 SvcGCMarker sgcm(SvcGCMarker::FULL);
216
217 CollectedHeap* heap = Universe::heap();
218 GCCauseSetter gccs(heap, _gc_cause);
219
220 // Check again if the space is available. Another thread
221 // may have similarly failed a metadata allocation and induced
|