184 GenCollectedHeap* gch = GenCollectedHeap::heap();
185 GCCauseSetter gccs(gch, _gc_cause);
186 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
187 }
188
189 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
190 size_t size,
191 Metaspace::MetadataType mdtype,
192 uint gc_count_before,
193 uint full_gc_count_before,
194 GCCause::Cause gc_cause)
195 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
196 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
197 assert(_size != 0, "An allocation should always be requested with this operation.");
198 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
199 }
200
201 // Returns true iff concurrent GCs unloads metadata.
202 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
203 #if INCLUDE_ALL_GCS
204 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
205 MetaspaceGC::set_should_concurrent_collect(true);
206 return true;
207 }
208
209 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
210 G1CollectedHeap* g1h = G1CollectedHeap::heap();
211 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
212
213 GCCauseSetter x(g1h, _gc_cause);
214
215 // At this point we are supposed to start a concurrent cycle. We
216 // will do so if one is not already in progress.
217 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
218
219 if (should_start) {
220 double pause_target = g1h->g1_policy()->max_pause_time_ms();
221 g1h->do_collection_pause_at_safepoint(pause_target);
222 }
223 return true;
224 }
|
184 GenCollectedHeap* gch = GenCollectedHeap::heap();
185 GCCauseSetter gccs(gch, _gc_cause);
186 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
187 }
188
189 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
190 size_t size,
191 Metaspace::MetadataType mdtype,
192 uint gc_count_before,
193 uint full_gc_count_before,
194 GCCause::Cause gc_cause)
195 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
196 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
197 assert(_size != 0, "An allocation should always be requested with this operation.");
198 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
199 }
200
201 // Returns true iff concurrent GCs unloads metadata.
202 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
203 #if INCLUDE_ALL_GCS
204 if (UseConcMarkSweepGC && ClassUnloadingWithConcurrentMark) {
205 MetaspaceGC::set_should_concurrent_collect(true);
206 return true;
207 }
208
209 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
210 G1CollectedHeap* g1h = G1CollectedHeap::heap();
211 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
212
213 GCCauseSetter x(g1h, _gc_cause);
214
215 // At this point we are supposed to start a concurrent cycle. We
216 // will do so if one is not already in progress.
217 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
218
219 if (should_start) {
220 double pause_target = g1h->g1_policy()->max_pause_time_ms();
221 g1h->do_collection_pause_at_safepoint(pause_target);
222 }
223 return true;
224 }
|