183 GenCollectedHeap* gch = GenCollectedHeap::heap();
184 GCCauseSetter gccs(gch, _gc_cause);
185 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
186 }
187
188 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
189 size_t size,
190 Metaspace::MetadataType mdtype,
191 uint gc_count_before,
192 uint full_gc_count_before,
193 GCCause::Cause gc_cause)
194 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
195 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
196 assert(_size != 0, "An allocation should always be requested with this operation.");
197 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
198 }
199
200 // Returns true iff concurrent GCs unloads metadata.
201 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
202 #if INCLUDE_ALL_GCS
203 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
204 MetaspaceGC::set_should_concurrent_collect(true);
205 return true;
206 }
207
208 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
209 G1CollectedHeap* g1h = G1CollectedHeap::heap();
210 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
211
212 GCCauseSetter x(g1h, _gc_cause);
213
214 // At this point we are supposed to start a concurrent cycle. We
215 // will do so if one is not already in progress.
216 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
217
218 if (should_start) {
219 double pause_target = g1h->g1_policy()->max_pause_time_ms();
220 g1h->do_collection_pause_at_safepoint(pause_target);
221 }
222 return true;
223 }
|
183 GenCollectedHeap* gch = GenCollectedHeap::heap();
184 GCCauseSetter gccs(gch, _gc_cause);
185 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
186 }
187
188 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
189 size_t size,
190 Metaspace::MetadataType mdtype,
191 uint gc_count_before,
192 uint full_gc_count_before,
193 GCCause::Cause gc_cause)
194 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
195 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
196 assert(_size != 0, "An allocation should always be requested with this operation.");
197 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
198 }
199
200 // Returns true iff concurrent GCs unloads metadata.
201 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
202 #if INCLUDE_ALL_GCS
203 if (UseConcMarkSweepGC && ClassUnloadingWithConcurrentMark) {
204 MetaspaceGC::set_should_concurrent_collect(true);
205 return true;
206 }
207
208 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
209 G1CollectedHeap* g1h = G1CollectedHeap::heap();
210 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
211
212 GCCauseSetter x(g1h, _gc_cause);
213
214 // At this point we are supposed to start a concurrent cycle. We
215 // will do so if one is not already in progress.
216 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
217
218 if (should_start) {
219 double pause_target = g1h->g1_policy()->max_pause_time_ms();
220 g1h->do_collection_pause_at_safepoint(pause_target);
221 }
222 return true;
223 }
|