< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 13113 : imported patch 8181917-refactor-ul-logstream


 576   }
 577 }
 578 
 579 void CollectedHeap::resize_all_tlabs() {
 580   if (UseTLAB) {
 581     assert(SafepointSynchronize::is_at_safepoint() ||
 582          !is_init_completed(),
 583          "should only resize tlabs at safepoint");
 584 
 585     ThreadLocalAllocBuffer::resize_all_tlabs();
 586   }
 587 }
 588 
 589 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 590   assert(timer != NULL, "timer is null");
 591   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 592     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 593     HeapDumper::dump_heap();
 594   }
 595 
 596   Log(gc, classhisto) log;
 597   if (log.is_trace()) {
 598     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 599     ResourceMark rm;
 600     VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);

 601     inspector.doit();
 602   }
 603 }
 604 
 605 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 606   full_gc_dump(timer, true);
 607 }
 608 
 609 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 610   full_gc_dump(timer, false);
 611 }
 612 
 613 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 614   // It is important to do this in a way such that concurrent readers can't
 615   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 616   _reserved.set_word_size(0);
 617   _reserved.set_start(start);
 618   _reserved.set_end(end);
 619 }


 576   }
 577 }
 578 
 579 void CollectedHeap::resize_all_tlabs() {
 580   if (UseTLAB) {
 581     assert(SafepointSynchronize::is_at_safepoint() ||
 582          !is_init_completed(),
 583          "should only resize tlabs at safepoint");
 584 
 585     ThreadLocalAllocBuffer::resize_all_tlabs();
 586   }
 587 }
 588 
 589 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 590   assert(timer != NULL, "timer is null");
 591   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 592     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 593     HeapDumper::dump_heap();
 594   }
 595 
 596   LogTarget(Trace, gc, classhisto) lt;
 597   if (lt.is_enabled()) {
 598     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 599     ResourceMark rm;
 600     LogStream ls(lt);
 601     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 602     inspector.doit();
 603   }
 604 }
 605 
 606 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 607   full_gc_dump(timer, true);
 608 }
 609 
 610 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 611   full_gc_dump(timer, false);
 612 }
 613 
 614 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 615   // It is important to do this in a way such that concurrent readers can't
 616   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 617   _reserved.set_word_size(0);
 618   _reserved.set_start(start);
 619   _reserved.set_end(end);
 620 }
< prev index next >