< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 13265 : imported patch 8181917-refactor-ul-logstream


 577   }
 578 }
 579 
 580 void CollectedHeap::resize_all_tlabs() {
 581   if (UseTLAB) {
 582     assert(SafepointSynchronize::is_at_safepoint() ||
 583          !is_init_completed(),
 584          "should only resize tlabs at safepoint");
 585 
 586     ThreadLocalAllocBuffer::resize_all_tlabs();
 587   }
 588 }
 589 
 590 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 591   assert(timer != NULL, "timer is null");
 592   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 593     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 594     HeapDumper::dump_heap();
 595   }
 596 
 597   Log(gc, classhisto) log;
 598   if (log.is_trace()) {
 599     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 600     ResourceMark rm;
 601     VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);

 602     inspector.doit();
 603   }
 604 }
 605 
 606 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 607   full_gc_dump(timer, true);
 608 }
 609 
 610 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 611   full_gc_dump(timer, false);
 612 }
 613 
 614 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 615   // It is important to do this in a way such that concurrent readers can't
 616   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 617   _reserved.set_word_size(0);
 618   _reserved.set_start(start);
 619   _reserved.set_end(end);
 620 }


 577   }
 578 }
 579 
 580 void CollectedHeap::resize_all_tlabs() {
 581   if (UseTLAB) {
 582     assert(SafepointSynchronize::is_at_safepoint() ||
 583          !is_init_completed(),
 584          "should only resize tlabs at safepoint");
 585 
 586     ThreadLocalAllocBuffer::resize_all_tlabs();
 587   }
 588 }
 589 
 590 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 591   assert(timer != NULL, "timer is null");
 592   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 593     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 594     HeapDumper::dump_heap();
 595   }
 596 
 597   LogTarget(Trace, gc, classhisto) lt;
 598   if (lt.is_enabled()) {
 599     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 600     ResourceMark rm;
 601     LogStream ls(lt);
 602     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
 603     inspector.doit();
 604   }
 605 }
 606 
 607 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 608   full_gc_dump(timer, true);
 609 }
 610 
 611 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 612   full_gc_dump(timer, false);
 613 }
 614 
 615 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 616   // It is important to do this in a way such that concurrent readers can't
 617   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 618   _reserved.set_word_size(0);
 619   _reserved.set_start(start);
 620   _reserved.set_end(end);
 621 }
< prev index next >