< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page




 287 
 288   if (_old_gen->should_allocate(size, is_tlab)) {
 289     res = _old_gen->allocate(size, is_tlab);
 290   }
 291 
 292   return res;
 293 }
 294 
 295 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 296                                          bool* gc_overhead_limit_was_exceeded) {
 297   return collector_policy()->mem_allocate_work(size,
 298                                                false /* is_tlab */,
 299                                                gc_overhead_limit_was_exceeded);
 300 }
 301 
 302 bool GenCollectedHeap::must_clear_all_soft_refs() {
 303   return _gc_cause == GCCause::_last_ditch_collection;
 304 }
 305 
 306 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 307   return UseConcMarkSweepGC &&
 308          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 309           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));







 310 }
 311 
 312 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 313                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 314                                           bool restore_marks_for_biased_locking) {
 315   // Timer for individual generations. Last argument is false: no CR
 316   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 317   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 318   // so we can assume here that the next GC id is what we want.
 319   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 320   TraceCollectorStats tcs(gen->counters());
 321   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 322 
 323   size_t prev_used = gen->used();
 324   gen->stat_record()->invocations++;
 325   gen->stat_record()->accumulated_time.start();
 326 
 327   // Must be done anew before each collection because
 328   // a previous collection will do mangling and will
 329   // change top of some spaces.




 287 
 288   if (_old_gen->should_allocate(size, is_tlab)) {
 289     res = _old_gen->allocate(size, is_tlab);
 290   }
 291 
 292   return res;
 293 }
 294 
 295 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 296                                          bool* gc_overhead_limit_was_exceeded) {
 297   return collector_policy()->mem_allocate_work(size,
 298                                                false /* is_tlab */,
 299                                                gc_overhead_limit_was_exceeded);
 300 }
 301 
 302 bool GenCollectedHeap::must_clear_all_soft_refs() {
 303   return _gc_cause == GCCause::_last_ditch_collection;
 304 }
 305 
 306 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 307   if (!UseConcMarkSweepGC) {
 308     return false;
 309   }
 310 
 311   switch (cause) {
 312     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 313     case GCCause::_java_lang_system_gc:
 314     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 315     default:                            return false;
 316   }
 317 }
 318 
 319 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 320                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 321                                           bool restore_marks_for_biased_locking) {
 322   // Timer for individual generations. Last argument is false: no CR
 323   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 324   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 325   // so we can assume here that the next GC id is what we want.
 326   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 327   TraceCollectorStats tcs(gen->counters());
 328   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 329 
 330   size_t prev_used = gen->used();
 331   gen->stat_record()->invocations++;
 332   gen->stat_record()->accumulated_time.start();
 333 
 334   // Must be done anew before each collection because
 335   // a previous collection will do mangling and will
 336   // change top of some spaces.


< prev index next >