< prev index next >

src/share/vm/memory/genCollectedHeap.cpp

Print this page




 293 
 294   if (_old_gen->should_allocate(size, is_tlab)) {
 295     res = _old_gen->allocate(size, is_tlab);
 296   }
 297 
 298   return res;
 299 }
 300 
 301 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 302                                          bool* gc_overhead_limit_was_exceeded) {
 303   return collector_policy()->mem_allocate_work(size,
 304                                                false /* is_tlab */,
 305                                                gc_overhead_limit_was_exceeded);
 306 }
 307 
 308 bool GenCollectedHeap::must_clear_all_soft_refs() {
 309   return _gc_cause == GCCause::_last_ditch_collection;
 310 }
 311 
 312 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 313   return UseConcMarkSweepGC &&
 314          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 315           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));







 316 }
 317 
 318 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 319                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 320                                           bool restore_marks_for_biased_locking) {
 321   // Timer for individual generations. Last argument is false: no CR
 322   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 323   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 324   // so we can assume here that the next GC id is what we want.
 325   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 326   TraceCollectorStats tcs(gen->counters());
 327   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 328 
 329   size_t prev_used = gen->used();
 330   gen->stat_record()->invocations++;
 331   gen->stat_record()->accumulated_time.start();
 332 
 333   // Must be done anew before each collection because
 334   // a previous collection will do mangling and will
 335   // change top of some spaces.




 293 
 294   if (_old_gen->should_allocate(size, is_tlab)) {
 295     res = _old_gen->allocate(size, is_tlab);
 296   }
 297 
 298   return res;
 299 }
 300 
 301 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 302                                          bool* gc_overhead_limit_was_exceeded) {
 303   return collector_policy()->mem_allocate_work(size,
 304                                                false /* is_tlab */,
 305                                                gc_overhead_limit_was_exceeded);
 306 }
 307 
 308 bool GenCollectedHeap::must_clear_all_soft_refs() {
 309   return _gc_cause == GCCause::_last_ditch_collection;
 310 }
 311 
 312 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 313   if (!UseConcMarkSweepGC) {
 314     return false;
 315   }
 316 
 317   switch (cause) {
 318     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 319     case GCCause::_java_lang_system_gc:
 320     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 321     default:                            return false;
 322   }
 323 }
 324 
 325 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 326                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 327                                           bool restore_marks_for_biased_locking) {
 328   // Timer for individual generations. Last argument is false: no CR
 329   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 330   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 331   // so we can assume here that the next GC id is what we want.
 332   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 333   TraceCollectorStats tcs(gen->counters());
 334   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 335 
 336   size_t prev_used = gen->used();
 337   gen->stat_record()->invocations++;
 338   gen->stat_record()->accumulated_time.start();
 339 
 340   // Must be done anew before each collection because
 341   // a previous collection will do mangling and will
 342   // change top of some spaces.


< prev index next >