< prev index next >

src/hotspot/share/services/heapDumper.cpp

Print this page
rev 58388 : 8237354: Add option to jcmd to write a gzipped heap dump
Reviewed-by:


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"

  35 #include "jfr/jfrEvents.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "oops/objArrayOop.inline.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/typeArrayOop.inline.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/javaCalls.hpp"
  46 #include "runtime/jniHandles.hpp"
  47 #include "runtime/os.inline.hpp"
  48 #include "runtime/reflectionUtils.hpp"
  49 #include "runtime/thread.inline.hpp"
  50 #include "runtime/threadSMR.hpp"
  51 #include "runtime/vframe.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "runtime/vmOperations.hpp"
  54 #include "services/heapDumper.hpp"


 360   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 361   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 362   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 363   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 364   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 365   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 366   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 367   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 368   HPROF_GC_CLASS_DUMP           = 0x20,
 369   HPROF_GC_INSTANCE_DUMP        = 0x21,
 370   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 371   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 372 } hprofTag;
 373 
 374 // Default stack trace ID (used for dummy HPROF_TRACE record)
 375 enum {
 376   STACK_TRACE_ID = 1,
 377   INITIAL_CLASS_COUNT = 200
 378 };
 379 
 380 // Supports I/O operations on a dump file






































































































































































































































































































































































































































































































































































































































 381 
 382 class DumpWriter : public StackObj {
 383  private:
 384   enum {
 385     io_buffer_max_size = 8*M,
 386     io_buffer_min_size = 64*K,
 387     dump_segment_header_size = 9
 388   };
 389 
 390   int _fd;              // file descriptor (-1 if dump file not open)
 391   julong _bytes_written; // number of byte written to dump file
 392 
 393   char* _buffer;    // internal buffer
 394   size_t _size;
 395   size_t _pos;
 396 
 397   bool _in_dump_segment; // Are we currently in a dump segment?
 398   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
 399   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
 400   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
 401 
 402   char* _error;   // error message when I/O fails
 403 
 404   void set_file_descriptor(int fd)              { _fd = fd; }
 405   int file_descriptor() const                   { return _fd; }
 406 
 407   bool is_open() const                          { return file_descriptor() >= 0; }
 408   void flush();
 409 
 410   char* buffer() const                          { return _buffer; }
 411   size_t buffer_size() const                    { return _size; }
 412   size_t position() const                       { return _pos; }
 413   void set_position(size_t pos)                 { _pos = pos; }
 414 
 415   void set_error(const char* error)             { _error = (char*)os::strdup(error); }

 416 
 417   // all I/O go through this function
 418   void write_internal(void* s, size_t len);
 419 
 420  public:
 421   DumpWriter(const char* path);
 422   ~DumpWriter();
 423 
 424   void close();
 425 
 426   // total number of bytes written to the disk
 427   julong bytes_written() const          { return _bytes_written; }
 428 
 429   char* error() const                   { return _error; }
 430 
 431   // writer functions
 432   void write_raw(void* s, size_t len);
 433   void write_u1(u1 x)                   { write_raw((void*)&x, 1); }
 434   void write_u2(u2 x);
 435   void write_u4(u4 x);
 436   void write_u8(u8 x);
 437   void write_objectID(oop o);
 438   void write_symbolID(Symbol* o);
 439   void write_classID(Klass* k);
 440   void write_id(u4 x);
 441 
 442   // Start a new sub-record. Starts a new heap dump segment if needed.
 443   void start_sub_record(u1 tag, u4 len);
 444   // Ends the current sub-record.
 445   void end_sub_record();
 446   // Finishes the current dump segment if not already finished.
 447   void finish_dump_segment();
 448 };
 449 
 450 DumpWriter::DumpWriter(const char* path) : _fd(-1), _bytes_written(0), _pos(0),
 451                                            _in_dump_segment(false), _error(NULL) {
 452   // try to allocate an I/O buffer of io_buffer_size. If there isn't
 453   // sufficient memory then reduce size until we can allocate something.
 454   _size = io_buffer_max_size;
 455   do {
 456     _buffer = (char*)os::malloc(_size, mtInternal);
 457     if (_buffer == NULL) {
 458       _size = _size >> 1;
 459     }
 460   } while (_buffer == NULL && _size >= io_buffer_min_size);
 461 
 462   if (_buffer == NULL) {
 463     set_error("Could not allocate buffer memory for heap dump");
 464   } else {
 465     _fd = os::create_binary_file(path, false);    // don't replace existing file
 466 
 467     // if the open failed we record the error
 468     if (_fd < 0) {
 469       set_error(os::strerror(errno));
 470     }
 471   }


 472 }
 473 
 474 DumpWriter::~DumpWriter() {
 475   close();
 476   os::free(_buffer);
 477   os::free(_error);
 478 }
 479 
 480 // closes dump file (if open)
 481 void DumpWriter::close() {
 482   // flush and close dump file
 483   if (is_open()) {
 484     flush();
 485     os::close(file_descriptor());
 486     set_file_descriptor(-1);
 487   }
 488 }
 489 
 490 // write directly to the file
 491 void DumpWriter::write_internal(void* s, size_t len) {
 492   if (is_open()) {
 493     const char* pos = (char*)s;
 494     ssize_t n = 0;
 495     while (len > 0) {
 496       uint tmp = (uint)MIN2(len, (size_t)INT_MAX);
 497       n = os::write(file_descriptor(), pos, tmp);
 498 
 499       if (n < 0) {
 500         // EINTR cannot happen here, os::write will take care of that
 501         set_error(os::strerror(errno));
 502         os::close(file_descriptor());
 503         set_file_descriptor(-1);
 504         return;
 505       }
 506 
 507       _bytes_written += n;
 508       pos += n;
 509       len -= n;
 510     }
 511   }

 512 }
 513 
 514 // write raw bytes
 515 void DumpWriter::write_raw(void* s, size_t len) {
 516   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
 517   debug_only(_sub_record_left -= len);
 518 
 519   // flush buffer to make room
 520   if (len > buffer_size() - position()) {
 521     assert(!_in_dump_segment || _is_huge_sub_record, "Cannot overflow in non-huge sub-record.");
 522     flush();
 523 
 524     // If larger than the buffer, just write it directly.
 525     if (len > buffer_size()) {
 526       write_internal(s, len);
 527 
 528       return;
 529     }




 530   }
 531 
 532   memcpy(buffer() + position(), s, len);
 533   set_position(position() + len);
 534 }
 535 
 536 // flush any buffered bytes to the file
 537 void DumpWriter::flush() {
 538   write_internal(buffer(), position());
 539   set_position(0);







 540 }
 541 
 542 void DumpWriter::write_u2(u2 x) {
 543   u2 v;
 544   Bytes::put_Java_u2((address)&v, x);
 545   write_raw((void*)&v, 2);
 546 }
 547 
 548 void DumpWriter::write_u4(u4 x) {
 549   u4 v;
 550   Bytes::put_Java_u4((address)&v, x);
 551   write_raw((void*)&v, 4);
 552 }
 553 
 554 void DumpWriter::write_u8(u8 x) {
 555   u8 v;
 556   Bytes::put_Java_u8((address)&v, x);
 557   write_raw((void*)&v, 8);
 558 }
 559 
 560 void DumpWriter::write_objectID(oop o) {
 561   address a = cast_from_oop<address>(o);
 562 #ifdef _LP64
 563   write_u8((u8)a);
 564 #else
 565   write_u4((u4)a);
 566 #endif
 567 }
 568 
 569 void DumpWriter::write_symbolID(Symbol* s) {
 570   address a = (address)((uintptr_t)s);
 571 #ifdef _LP64
 572   write_u8((u8)a);
 573 #else
 574   write_u4((u4)a);
 575 #endif
 576 }
 577 


 592   if (_in_dump_segment) {
 593     assert(_sub_record_left == 0, "Last sub-record not written completely");
 594     assert(_sub_record_ended, "sub-record must have ended");
 595 
 596     // Fix up the dump segment length if we haven't written a huge sub-record last
 597     // (in which case the segment length was already set to the correct value initially).
 598     if (!_is_huge_sub_record) {
 599       assert(position() > dump_segment_header_size, "Dump segment should have some content");
 600       Bytes::put_Java_u4((address) (buffer() + 5), (u4) (position() - dump_segment_header_size));
 601     }
 602 
 603     flush();
 604     _in_dump_segment = false;
 605   }
 606 }
 607 
 608 void DumpWriter::start_sub_record(u1 tag, u4 len) {
 609   if (!_in_dump_segment) {
 610     if (position() > 0) {
 611       flush();
 612       assert(position() == 0, "Must be at the start");
 613     }
 614 


 615     write_u1(HPROF_HEAP_DUMP_SEGMENT);
 616     write_u4(0); // timestamp
 617     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
 618     // this is already the correct length, since we don't add more sub-records.
 619     write_u4(len);
 620     _in_dump_segment = true;
 621     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
 622   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
 623     // This object will not fit in completely or the last sub-record was huge.
 624     // Finish the current segement and try again.
 625     finish_dump_segment();
 626     start_sub_record(tag, len);
 627 
 628     return;
 629   }
 630 
 631   debug_only(_sub_record_left = len);
 632   debug_only(_sub_record_ended = false);
 633 
 634   write_u1(tag);


1486   }
1487 
1488   if (DumperSupport::mask_dormant_archived_object(o) == NULL) {
1489     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
1490     return;
1491   }
1492 
1493   if (o->is_instance()) {
1494     // create a HPROF_GC_INSTANCE record for each object
1495     DumperSupport::dump_instance(writer(), o);
1496   } else if (o->is_objArray()) {
1497     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
1498     DumperSupport::dump_object_array(writer(), objArrayOop(o));
1499   } else if (o->is_typeArray()) {
1500     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
1501     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
1502   }
1503 }
1504 
1505 // The VM operation that performs the heap dump
1506 class VM_HeapDumper : public VM_GC_Operation {
1507  private:
1508   static VM_HeapDumper* _global_dumper;
1509   static DumpWriter*    _global_writer;
1510   DumpWriter*           _local_writer;
1511   JavaThread*           _oome_thread;
1512   Method*               _oome_constructor;
1513   bool _gc_before_heap_dump;
1514   GrowableArray<Klass*>* _klass_map;
1515   ThreadStackTrace** _stack_traces;
1516   int _num_threads;
1517 
1518   // accessors and setters
1519   static VM_HeapDumper* dumper()         {  assert(_global_dumper != NULL, "Error"); return _global_dumper; }
1520   static DumpWriter* writer()            {  assert(_global_writer != NULL, "Error"); return _global_writer; }
1521   void set_global_dumper() {
1522     assert(_global_dumper == NULL, "Error");
1523     _global_dumper = this;
1524   }
1525   void set_global_writer() {
1526     assert(_global_writer == NULL, "Error");


1542   // writes a HPROF_GC_CLASS_DUMP records for a given basic type
1543   // array (and each multi-dimensional array too)
1544   static void do_basic_type_array_class_dump(Klass* k);
1545 
1546   // HPROF_GC_ROOT_THREAD_OBJ records
1547   int do_thread(JavaThread* thread, u4 thread_serial_num);
1548   void do_threads();
1549 
1550   void add_class_serial_number(Klass* k, int serial_num) {
1551     _klass_map->at_put_grow(serial_num, k);
1552   }
1553 
1554   // HPROF_TRACE and HPROF_FRAME records
1555   void dump_stack_traces();
1556 
1557  public:
1558   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome) :
1559     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
1560                     GCCause::_heap_dump /* GC Cause */,
1561                     0 /* total full collections, dummy, ignored */,
1562                     gc_before_heap_dump) {

1563     _local_writer = writer;
1564     _gc_before_heap_dump = gc_before_heap_dump;
1565     _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
1566     _stack_traces = NULL;
1567     _num_threads = 0;
1568     if (oome) {
1569       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
1570       // get OutOfMemoryError zero-parameter constructor
1571       InstanceKlass* oome_ik = SystemDictionary::OutOfMemoryError_klass();
1572       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
1573                                                           vmSymbols::void_method_signature());
1574       // get thread throwing OOME when generating the heap dump at OOME
1575       _oome_thread = JavaThread::current();
1576     } else {
1577       _oome_thread = NULL;
1578       _oome_constructor = NULL;
1579     }
1580   }
1581   ~VM_HeapDumper() {
1582     if (_stack_traces != NULL) {
1583       for (int i=0; i < _num_threads; i++) {
1584         delete _stack_traces[i];
1585       }
1586       FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
1587     }
1588     delete _klass_map;
1589   }
1590 
1591   VMOp_Type type() const { return VMOp_HeapDumper; }
1592   void doit();

1593 };
1594 

1595 VM_HeapDumper* VM_HeapDumper::_global_dumper = NULL;
1596 DumpWriter*    VM_HeapDumper::_global_writer = NULL;
1597 
1598 bool VM_HeapDumper::skip_operation() const {
1599   return false;
1600 }
1601 
1602 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
1603 void DumperSupport::end_of_dump(DumpWriter* writer) {
1604   writer->finish_dump_segment();
1605 
1606   writer->write_u1(HPROF_HEAP_DUMP_END);
1607   writer->write_u4(0);
1608   writer->write_u4(0);
1609 }
1610 
1611 // writes a HPROF_LOAD_CLASS record for the class (and each of its
1612 // array classes)
1613 void VM_HeapDumper::do_load_class(Klass* k) {
1614   static u4 class_serial_num = 0;


1803 
1804   HandleMark hm;
1805   CollectedHeap* ch = Universe::heap();
1806 
1807   ch->ensure_parsability(false); // must happen, even if collection does
1808                                  // not happen (e.g. due to GCLocker)
1809 
1810   if (_gc_before_heap_dump) {
1811     if (GCLocker::is_active()) {
1812       warning("GC locker is held; pre-heapdump GC was skipped");
1813     } else {
1814       ch->collect_as_vm_thread(GCCause::_heap_dump);
1815     }
1816   }
1817 
1818   // At this point we should be the only dumper active, so
1819   // the following should be safe.
1820   set_global_dumper();
1821   set_global_writer();
1822 
1823   // Write the file header - we always use 1.0.2
1824   size_t used = ch->used();


















1825   const char* header = "JAVA PROFILE 1.0.2";
1826 
1827   // header is few bytes long - no chance to overflow int
1828   writer()->write_raw((void*)header, (int)strlen(header));
1829   writer()->write_u1(0); // terminator
1830   writer()->write_u4(oopSize);
1831   // timestamp is current time in ms
1832   writer()->write_u8(os::javaTimeMillis());
1833 
1834   // HPROF_UTF8 records
1835   SymbolTableDumper sym_dumper(writer());
1836   SymbolTable::symbols_do(&sym_dumper);
1837 
1838   // write HPROF_LOAD_CLASS records
1839   {
1840     LockedClassesDo locked_load_classes(&do_load_class);
1841     ClassLoaderDataGraph::classes_do(&locked_load_classes);
1842   }
1843   Universe::basic_type_classes_do(&do_load_class);
1844 


1867 
1868   // HPROF_GC_ROOT_MONITOR_USED
1869   MonitorUsedDumper mon_dumper(writer());
1870   ObjectSynchronizer::oops_do(&mon_dumper);
1871 
1872   // HPROF_GC_ROOT_JNI_GLOBAL
1873   JNIGlobalsDumper jni_dumper(writer());
1874   JNIHandles::oops_do(&jni_dumper);
1875   Universe::oops_do(&jni_dumper);  // technically not jni roots, but global roots
1876                                    // for things like preallocated throwable backtraces
1877 
1878   // HPROF_GC_ROOT_STICKY_CLASS
1879   // These should be classes in the NULL class loader data, and not all classes
1880   // if !ClassUnloading
1881   StickyClassDumper class_dumper(writer());
1882   ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
1883 
1884   // Writes the HPROF_HEAP_DUMP_END record.
1885   DumperSupport::end_of_dump(writer());
1886 
1887   // Now we clear the global variables, so that a future dumper might run.
1888   clear_global_dumper();
1889   clear_global_writer();
1890 }
1891 
1892 void VM_HeapDumper::dump_stack_traces() {
1893   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
1894   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
1895   writer()->write_u4((u4) STACK_TRACE_ID);
1896   writer()->write_u4(0);                    // thread number
1897   writer()->write_u4(0);                    // frame count
1898 
1899   _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
1900   int frame_serial_num = 0;
1901   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
1902     oop threadObj = thread->threadObj();
1903     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
1904       // dump thread stack trace

1905       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
1906       stack_trace->dump_stack_at_safepoint(-1);
1907       _stack_traces[_num_threads++] = stack_trace;
1908 
1909       // write HPROF_FRAME records for this thread's stack trace
1910       int depth = stack_trace->get_stack_depth();
1911       int thread_frame_start = frame_serial_num;
1912       int extra_frames = 0;
1913       // write fake frame that makes it look like the thread, which caused OOME,
1914       // is in the OutOfMemoryError zero-parameter constructor
1915       if (thread == _oome_thread && _oome_constructor != NULL) {
1916         int oome_serial_num = _klass_map->find(_oome_constructor->method_holder());
1917         // the class serial number starts from 1
1918         assert(oome_serial_num > 0, "OutOfMemoryError class not found");
1919         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
1920                                         _oome_constructor, 0);
1921         extra_frames++;
1922       }
1923       for (int j=0; j < depth; j++) {
1924         StackFrameInfo* frame = stack_trace->stack_frame_at(j);


1927         // the class serial number starts from 1
1928         assert(class_serial_num > 0, "class not found");
1929         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
1930       }
1931       depth += extra_frames;
1932 
1933       // write HPROF_TRACE record for one thread
1934       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
1935       int stack_serial_num = _num_threads + STACK_TRACE_ID;
1936       writer()->write_u4(stack_serial_num);      // stack trace serial number
1937       writer()->write_u4((u4) _num_threads);     // thread serial number
1938       writer()->write_u4(depth);                 // frame count
1939       for (int j=1; j <= depth; j++) {
1940         writer()->write_id(thread_frame_start + j);
1941       }
1942     }
1943   }
1944 }
1945 
1946 // dump the heap to given path.
1947 int HeapDumper::dump(const char* path, outputStream* out) {
1948   assert(path != NULL && strlen(path) > 0, "path missing");
1949 
1950   // print message in interactive case
1951   if (out != NULL) {
1952     out->print_cr("Dumping heap to %s ...", path);
1953     timer()->start();
1954   }
1955 
1956   // create JFR event
1957   EventHeapDump event;
1958 
1959   // create the dump writer. If the file can be opened then bail
1960   DumpWriter writer(path);











1961   if (writer.error() != NULL) {
1962     set_error(writer.error());
1963     if (out != NULL) {
1964       out->print_cr("Unable to create %s: %s", path,
1965         (error() != NULL) ? error() : "reason unknown");
1966     }
1967     return -1;
1968   }
1969 
1970   // generate the dump
1971   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome);
1972   if (Thread::current()->is_VM_thread()) {
1973     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
1974     dumper.doit();
1975   } else {
1976     VMThread::execute(&dumper);
1977   }
1978 
1979   // close dump file and record any error that the writer may have encountered
1980   writer.close();
1981   set_error(writer.error());
1982 
1983   // emit JFR event
1984   if (error() == NULL) {
1985     event.set_destination(path);
1986     event.set_gcBeforeDump(_gc_before_heap_dump);
1987     event.set_size(writer.bytes_written());
1988     event.set_onOutOfMemoryError(_oome);
1989     event.commit();
1990   }
1991 
1992   // print message in interactive case
1993   if (out != NULL) {
1994     timer()->stop();
1995     if (error() == NULL) {
1996       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
1997                     writer.bytes_written(), timer()->seconds());
1998     } else {
1999       out->print_cr("Dump file is incomplete: %s", writer.error());
2000     }


2007 HeapDumper::~HeapDumper() {
2008   if (timer()->is_active()) {
2009     timer()->stop();
2010   }
2011   set_error(NULL);
2012 }
2013 
2014 
2015 // returns the error string (resource allocated), or NULL
2016 char* HeapDumper::error_as_C_string() const {
2017   if (error() != NULL) {
2018     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
2019     strcpy(str, error());
2020     return str;
2021   } else {
2022     return NULL;
2023   }
2024 }
2025 
2026 // set the error string
2027 void HeapDumper::set_error(char* error) {
2028   if (_error != NULL) {
2029     os::free(_error);
2030   }
2031   if (error == NULL) {
2032     _error = NULL;
2033   } else {
2034     _error = os::strdup(error);
2035     assert(_error != NULL, "allocation failure");
2036   }
2037 }
2038 
2039 // Called by out-of-memory error reporting by a single Java thread
2040 // outside of a JVM safepoint
2041 void HeapDumper::dump_heap_from_oome() {
2042   HeapDumper::dump_heap(true);
2043 }
2044 
2045 // Called by error reporting by a single Java thread outside of a JVM safepoint,
2046 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
2047 // callers are strictly serialized and guaranteed not to interfere below. For more




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workgroup.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "oops/objArrayOop.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/typeArrayOop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/javaCalls.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/os.inline.hpp"
  49 #include "runtime/reflectionUtils.hpp"
  50 #include "runtime/thread.inline.hpp"
  51 #include "runtime/threadSMR.hpp"
  52 #include "runtime/vframe.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "runtime/vmOperations.hpp"
  55 #include "services/heapDumper.hpp"


 361   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 362   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 363   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 364   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 365   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 366   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 367   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 368   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 369   HPROF_GC_CLASS_DUMP           = 0x20,
 370   HPROF_GC_INSTANCE_DUMP        = 0x21,
 371   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 372   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 373 } hprofTag;
 374 
 375 // Default stack trace ID (used for dummy HPROF_TRACE record)
 376 enum {
 377   STACK_TRACE_ID = 1,
 378   INITIAL_CLASS_COUNT = 200
 379 };
 380 
 381 class GzipBackend;
 382 class WriteWorkList;
 383 
 384 // Interface for a compression  implementation.
 385 class AbstractCompressor : public CHeapObj<mtInternal> {
 386 public:
 387   virtual ~AbstractCompressor() { }
 388 
 389   // Initialized the compressor. Return a static error message in case of an error.
 390   // Otherwise it initized the needed out and tmp size for the given block size.
 391   virtual char const* init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size) = 0;
 392 
 393   // Does the actual compression. Returns NULL on success and a static error message otherwise.
 394   // Sets the 'compressed_size'.
 395   virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size,
 396                                size_t* compressed_size) = 0;
 397 };
 398 
 399 // Interface for a writer implementation.
 400 class AbstractWriter : public CHeapObj<mtInternal> {
 401 public:
 402   virtual ~AbstractWriter() { }
 403 
 404   // Opens the writer. Returns NULL on success and a static error message otherwise.
 405   virtual char const* open_writer() = 0;
 406 
 407   // Does the write. Returns NULL on success and a static error message otherwise.
 408   virtual char const* write_buf(char* buf, ssize_t size) = 0;
 409 };
 410 
 411 
 412 typedef char const* (*GzipInitFunc)(size_t, size_t*, size_t*, int);
 413 typedef size_t(*GzipFunc)(char*, size_t, char*, size_t, char*, size_t, int, char*, char const**);
 414 
 415 class GZipComressor : public AbstractCompressor {
 416 private:
 417   int _level;
 418   size_t _block_size;
 419   bool _is_first;
 420 
 421   GzipInitFunc gzip_init_func;
 422   GzipFunc gzip_func;
 423 
 424   void* load_gzip_func(char const* name);
 425 
 426 public:
 427   GZipComressor(int level) : _level(level), _block_size(0), _is_first(false) {
 428   }
 429 
 430   virtual char const* init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size);
 431 
 432   virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size,
 433                                size_t* compressed_size);
 434 };
 435 
 436 void* GZipComressor::load_gzip_func(char const* name) {
 437   char path[JVM_MAXPATHLEN];
 438   char ebuf[1024];
 439   void* handle;
 440 
 441   if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
 442     handle = os::dll_load(path, ebuf, sizeof ebuf);
 443 
 444     if (handle != NULL) {
 445       return os::dll_lookup(handle, name);
 446     }
 447   }
 448 
 449   return NULL;
 450 }
 451 
 452 char const* GZipComressor::init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size) {
 453   _block_size = block_size;
 454   _is_first = true;
 455 
 456   gzip_func = (GzipFunc) load_gzip_func("ZIP_GZip_Fully");
 457 
 458   if (gzip_func == NULL) {
 459     return  "Cannot get ZIP_GZip_Fully function";
 460   } else {
 461     gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams");
 462 
 463     if (gzip_init_func == NULL) {
 464       return "Cannot get ZIP_GZip_InitParams function";
 465     } else {
 466       return gzip_init_func(block_size, needed_out_size, needed_tmp_size, _level);
 467     }
 468   }
 469 }
 470 
 471 char const* GZipComressor::compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp,
 472                                     size_t tmp_size, size_t* compressed_size) {
 473   char const* msg = NULL;
 474 
 475   if (_is_first) {
 476     char buf[128];
 477     jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT, _block_size);
 478     *compressed_size = gzip_func(in, in_size, out, out_size, tmp, tmp_size, _level, buf, &msg);
 479     _is_first = false;
 480   } else {
 481     *compressed_size = gzip_func(in, in_size, out, out_size, tmp, tmp_size, _level, NULL, &msg);
 482   }
 483 
 484   return msg;
 485 }
 486 
 487 
 488 // A writer for a file.
 489 class FileWriter : public AbstractWriter {
 490 private:
 491   char const* _path;
 492   int _fd;
 493 
 494 public:
 495   FileWriter(char const* path) : _path(path), _fd(-1) { }
 496 
 497   ~FileWriter();
 498 
 499   // Opens the writer. Returns NULL on success and a static error message otherwise.
 500   virtual char const* open_writer();
 501 
 502   // Does the write. Returns NULL on success and a static error message otherwise.
 503   virtual char const* write_buf(char* buf, ssize_t size);
 504 };
 505 
 506 char const* FileWriter::open_writer() {
 507   assert(_fd < 0, "Must not already be open");
 508 
 509   _fd = os::create_binary_file(_path, false);    // don't replace existing file
 510 
 511   if (_fd < 0) {
 512     return os::strerror(errno);
 513   }
 514 
 515   return NULL;
 516 }
 517 
 518 FileWriter::~FileWriter() {
 519   if (_fd >= 0) {
 520     os::close(_fd);
 521     _fd = -1;
 522   }
 523 }
 524 
 525 char const* FileWriter::write_buf(char* buf, ssize_t size) {
 526   assert(_fd >= 0, "Must be open");
 527   assert(size > 0, "Must write at least one byte");
 528 
 529   ssize_t n = (ssize_t) os::write(_fd, buf, (uint) size);
 530 
 531   if (n <= 0) {
 532     return os::strerror(errno);
 533   }
 534 
 535   return NULL;
 536 }
 537 
 538 // The data needed to write a single buffer (and compress it optionally).
 539 struct WriteWork {
 540   // The id of the work.
 541   int64_t id;
 542 
 543   // The input buffer where the raw data is
 544   char* in;
 545   size_t in_used;
 546   size_t in_max;
 547 
 548   // The output buffer where the compressed data is. Is NULL when compression is disabled.
 549   char* out;
 550   size_t out_used;
 551   size_t out_max;
 552 
 553   // The temporary space needed for compression. Is NULL when compression is disabled.
 554   char* tmp;
 555   size_t tmp_max;
 556 
 557   // Used to link works into lists.
 558   WriteWork* _next;
 559   WriteWork* _prev;
 560 };
 561 
 562 // A list for works.
 563 class WorkList {
 564 private:
 565   WriteWork _head;
 566 
 567   void insert(WriteWork* before, WriteWork* work);
 568   WriteWork* remove(WriteWork* work);
 569 
 570 public:
 571   WorkList();
 572 
 573   // Return true if the list is empty.
 574   bool is_empty() { return _head._next == &_head; }
 575 
 576   // Adds to the beginning of the list.
 577   void add_first(WriteWork* work) { insert(&_head, work); }
 578 
 579   // Adds to the end of the list.
 580   void add_last(WriteWork* work) { insert(_head._prev, work); }
 581 
 582   // Adds so the ids are ordered.
 583   void add_by_id(WriteWork* work);
 584 
 585   // Returns the first element.
 586   WriteWork* first() { return is_empty() ? NULL : _head._next; }
 587 
 588   // Returns the last element.
 589   WriteWork* last() { return is_empty() ? NULL : _head._prev; }
 590 
 591   // Removes the first element. Returns NULL is empty.
 592   WriteWork* remove_first() { return remove(first()); }
 593 
 594   // Removes the last element. Returns NULL is empty.
 595   WriteWork* remove_last() { return remove(first()); }
 596 };
 597 
 598 WorkList::WorkList() {
 599   _head._next = &_head;
 600   _head._prev = &_head;
 601 }
 602 
 603 void WorkList::insert(WriteWork* before, WriteWork* work) {
 604   work->_prev = before;
 605   work->_next = before->_next;
 606   before->_next = work;
 607   work->_next->_prev = work;
 608 }
 609 
 610 WriteWork* WorkList::remove(WriteWork* work) {
 611   if (work != NULL) {
 612     assert(work->_next != work, "Invalid next");
 613     assert(work->_prev != work, "Invalid prev");
 614     work->_prev->_next = work->_next;;
 615     work->_next->_prev = work->_prev;
 616     work->_next = NULL;
 617     work->_prev = NULL;
 618   }
 619 
 620   return work;
 621 }
 622 
 623 void WorkList::add_by_id(WriteWork* work) {
 624   if (is_empty()) {
 625     add_first(work);
 626   } else {
 627     WriteWork* last_curr = &_head;
 628     WriteWork* curr = _head._next;
 629 
 630     while (curr->id < work->id) {
 631       last_curr = curr;
 632       curr = curr->_next;
 633 
 634       if (curr == &_head) {
 635         add_last(work);
 636         return;
 637       }
 638     }
 639 
 640     insert(last_curr, work);
 641   }
 642 }
 643 
 644 // The backend used to write data (and optionally compress it).
 645 class CompressionBackend : StackObj {
 646   bool _active;
 647   char const * _err;
 648 
 649   int _nr_of_threads;
 650   int _works_created;
 651   bool _work_creation_failed;
 652 
 653   int64_t _id_to_write;
 654   int64_t _next_id;
 655 
 656   size_t _in_size;
 657   size_t _max_waste;
 658   size_t _out_size;
 659   size_t _tmp_size;
 660 
 661   size_t _written;
 662 
 663   AbstractWriter* _writer;
 664   AbstractCompressor* _compressor;
 665 
 666   Monitor* _lock;
 667 
 668   WriteWork* _current;
 669   WorkList _to_compress;
 670   WorkList _unused;
 671   WorkList _finished;
 672 
 673   void set_error(char const* new_error);
 674 
 675   WriteWork* allocate_work(size_t in_size, size_t out_size, size_t tmp_size);
 676   void free_work(WriteWork* work);
 677   void free_work_list(WorkList* list);
 678 
 679   WriteWork* get_work();
 680   void do_compress(WriteWork* work);
 681   void finish_work(WriteWork* work);
 682 
 683 public:
 684   // compressor can be NULL if no compression is used.
 685   // Takes ownership of the writer and compressor.
 686   // block_size is the buffer size of a WriteWork.
 687   // max_waste is the maxiumum number of bytes to leave
 688   // empty in the buffer when it is written.
 689   CompressionBackend(AbstractWriter* writer, AbstractCompressor* compressor,
 690                      size_t block_size, size_t max_waste);
 691 
 692   ~CompressionBackend();
 693 
 694   size_t get_written() const { return _written; }
 695 
 696   char const* error() const { return _err; }
 697 
 698   // Commits the old buffer and sets up a new one.
 699   void get_new_buffer(char** buffer, size_t* used, size_t* max);
 700 
 701   // The entry point for a worker thread. If single_run is true, we only handle one work entry.
 702   void thread_loop(bool single_run);
 703 
 704   // Shuts down the backend, releasing all threads.
 705   void deactivate();
 706 };
 707 
 708 CompressionBackend::CompressionBackend(AbstractWriter* writer, AbstractCompressor* compressor,
 709                                        size_t block_size, size_t max_waste) :
 710   _active(false),
 711   _err(NULL),
 712   _nr_of_threads(0),
 713   _works_created(0),
 714   _work_creation_failed(false),
 715   _id_to_write(0),
 716   _next_id(0),
 717   _in_size(block_size),
 718   _max_waste(max_waste),
 719   _out_size(0),
 720   _tmp_size(0),
 721   _written(0),
 722   _writer(writer),
 723   _compressor(compressor),
 724   _lock(new (std::nothrow) PaddedMonitor(Mutex::leaf, "HProf Compression Backend",
 725         true, Mutex::_safepoint_check_never)) {
 726   if (_writer == NULL) {
 727     set_error("Could not allocate writer");
 728   } else if (_lock == NULL) {
 729     set_error("Could not allocate lock");
 730   } else {
 731     set_error(_writer->open_writer());
 732   }
 733 
 734   if (_compressor != NULL) {
 735     set_error(_compressor->init(_in_size, &_out_size, &_tmp_size));
 736   }
 737 
 738   _current = allocate_work(_in_size, _out_size, _tmp_size);
 739 
 740   if (_current == NULL) {
 741     set_error("Could not allocate memory for buffer");
 742   }
 743 
 744   _active = (_err == NULL);
 745 }
 746 
 747 CompressionBackend::~CompressionBackend() {
 748   assert(!_active, "Must not be active by now");
 749   assert(_nr_of_threads == 0, "Must have no active threads");
 750   assert(_to_compress.is_empty() && _finished.is_empty(), "Still work to do");
 751 
 752   free_work_list(&_unused);
 753   free_work(_current);
 754   assert(_works_created == 0, "All work must have been freed");
 755 
 756   delete _compressor;
 757   delete _writer;
 758   delete _lock;
 759 }
 760 
 761 void CompressionBackend::deactivate() {
 762   assert(_active, "Must be active");
 763 
 764   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 765 
 766   // Make sure we write a partially filled buffer.
 767   if ((_current != NULL) && (_current->in_used > 0)) {
 768     _current->id = _next_id++;
 769     _to_compress.add_last(_current);
 770     _current = NULL;
 771     ml.notify_all();
 772   }
 773 
 774   // Wait for the threads to drain the compression work list.
 775   while (!_to_compress.is_empty()) {
 776     // If we have no threads, compress the current one itself.
 777     if (_nr_of_threads == 0) {
 778       MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 779       thread_loop(true);
 780     } else {
 781       ml.wait();
 782     }
 783   }
 784 
 785   _active = false;
 786   ml.notify_all();
 787 }
 788 
 789 void CompressionBackend::thread_loop(bool single_run) {
 790   // Register if this is a worker thread.
 791   if (!single_run) {
 792     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 793     _nr_of_threads++;
 794   }
 795 
 796   while (true) {
 797     WriteWork* work = get_work();
 798 
 799     if (work == NULL) {
 800       assert(!single_run, "Should never happen for single thread");
 801       MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 802       _nr_of_threads--;
 803       assert(_nr_of_threads >= 0, "Too many threads finished");
 804       ml.notify_all();
 805 
 806       return;
 807     } else {
 808       do_compress(work);
 809       finish_work(work);
 810     }
 811 
 812     if (single_run) {
 813       return;
 814     }
 815   }
 816 }
 817 
 818 void CompressionBackend::set_error(char const* new_error) {
 819   if ((new_error != NULL) && (_err == NULL)) {
 820     _err = new_error;
 821   }
 822 }
 823 
 824 WriteWork* CompressionBackend::allocate_work(size_t in_size, size_t out_size, size_t tmp_size) {
 825   WriteWork* result = (WriteWork*) os::malloc(sizeof(WriteWork), mtInternal);
 826 
 827   if (result == NULL) {
 828     _work_creation_failed = true;
 829     return NULL;
 830   }
 831 
 832   _works_created++;
 833   result->in = (char*) os::malloc(in_size, mtInternal);
 834   result->in_max = in_size;
 835   result->in_used = 0;
 836   result->out = NULL;
 837   result->tmp = NULL;
 838 
 839   if (result->in == NULL) {
 840     goto fail;
 841   }
 842 
 843   if (out_size > 0) {
 844     result->out = (char*) os::malloc(out_size, mtInternal);
 845     result->out_used = 0;
 846     result->out_max = out_size;
 847 
 848     if (result->out == NULL) {
 849       goto fail;
 850     }
 851   }
 852 
 853   if (tmp_size > 0) {
 854     result->tmp = (char*) os::malloc(tmp_size, mtInternal);
 855     result->tmp_max = tmp_size;
 856 
 857     if (result->tmp == NULL) {
 858       goto fail;
 859     }
 860   }
 861 
 862   return result;
 863 
 864 fail:
 865   free_work(result);
 866   _work_creation_failed = true;
 867   return NULL;
 868 }
 869 
 870 void CompressionBackend::free_work(WriteWork* work) {
 871   if (work != NULL) {
 872     os::free(work->in);
 873     os::free(work->out);
 874     os::free(work->tmp);
 875     os::free(work);
 876     --_works_created;
 877   }
 878 }
 879 
 880 void CompressionBackend::free_work_list(WorkList* list) {
 881   while (!list->is_empty()) {
 882     free_work(list->remove_first());
 883   }
 884 }
 885 
 886 WriteWork* CompressionBackend::get_work() {
 887   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 888 
 889   while (_active && _to_compress.is_empty()) {
 890     ml.wait();
 891   }
 892 
 893   return _to_compress.remove_first();
 894 }
 895 
 896 void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max) {
 897   if (_active) {
 898     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 899 
 900     if (*used > 0) {
 901       _current->in_used += *used;
 902 
 903       // Check if we don not waste more than _max_waste. If yes, write the buffer.
 904       // Otherwise return the rest of the buffer as the new buffer.
 905       if (_current->in_max - _current->in_used <= _max_waste) {
 906         _current->id = _next_id++;
 907         _to_compress.add_last(_current);
 908         _current = NULL;
 909         ml.notify_all();
 910       } else {
 911         *buffer = _current->in + _current->in_used;
 912         *used = 0;
 913         *max = _current->in_max - _current->in_used;
 914 
 915         return;
 916       }
 917     }
 918 
 919     while ((_current == NULL) && _unused.is_empty() && _active) {
 920       // Add more work objects if needed.
 921       if (!_work_creation_failed && (_works_created <= _nr_of_threads)) {
 922         WriteWork* work = allocate_work(_in_size, _out_size, _tmp_size);
 923 
 924         if (work != NULL) {
 925           _unused.add_first(work);
 926         }
 927       } else if (!_to_compress.is_empty() && (_nr_of_threads == 0)) {
 928         // If we have no threads, compress the current one itself.
 929         MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 930         thread_loop(true);
 931       } else {
 932         ml.wait();
 933       }
 934     }
 935 
 936     if (_current == NULL) {
 937       _current = _unused.remove_first();
 938     }
 939 
 940     if (_current != NULL) {
 941       _current->in_used = 0;
 942       _current->out_used = 0;
 943       *buffer = _current->in;
 944       *used = 0;
 945       *max = _current->in_max;
 946 
 947       return;
 948     }
 949   }
 950 
 951   *buffer = NULL;
 952   *used = 0;
 953   *max = 0;
 954 
 955   return;
 956 }
 957 
 958 void CompressionBackend::do_compress(WriteWork* work) {
 959   if (_compressor != NULL) {
 960     char const* msg = _compressor->compress(work->in, work->in_used, work->out, work->out_max,
 961                                             work->tmp, _tmp_size, &work->out_used);
 962 
 963     if (msg != NULL) {
 964       MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 965       set_error(msg);
 966     }
 967   }
 968 }
 969 
 970 void CompressionBackend::finish_work(WriteWork* work) {
 971   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 972 
 973   _finished.add_by_id(work);
 974 
 975   // Write all finished works as far as we can.
 976   while (!_finished.is_empty() && (_finished.first()->id == _id_to_write)) {
 977     WriteWork* to_write = _finished.remove_first();
 978     size_t left = _compressor ==  NULL ? to_write->in_used : to_write->out_used;
 979     char* p = _compressor == NULL ? to_write->in : to_write->out;
 980     char const* msg = NULL;
 981 
 982     if (_err == NULL) {
 983       _written += left;
 984       MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 985       msg = _writer->write_buf(p, (ssize_t) left);
 986     }
 987 
 988     set_error(msg);
 989     _unused.add_first(to_write);
 990     _id_to_write++;
 991   }
 992 
 993   ml.notify_all();
 994 }
 995 
 996 
 997 class DumpWriter : public StackObj {
 998  private:
 999   enum {
1000     io_buffer_max_size = 1*M,
1001     io_buffer_max_waste = 10*K,
1002     dump_segment_header_size = 9
1003   };
1004 



1005   char* _buffer;    // internal buffer
1006   size_t _size;
1007   size_t _pos;
1008 
1009   bool _in_dump_segment; // Are we currently in a dump segment?
1010   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
1011   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
1012   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
1013 
1014   CompressionBackend _backend; // Does the actual writing.



1015 

1016   void flush();
1017 
1018   char* buffer() const                          { return _buffer; }
1019   size_t buffer_size() const                    { return _size; }
1020   size_t position() const                       { return _pos; }
1021   void set_position(size_t pos)                 { _pos = pos; }
1022 
1023   // Can be called if we have enough room in the buffer.
1024   void write_fast(void* s, size_t len);
1025 
1026   // Returns true if we have enough room in the buffer for 'len' bytes.
1027   bool can_write_fast(size_t len);
1028 
1029  public:
1030   // Takes ownership of the writer and compressor.
1031   DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
1032 
1033   ~DumpWriter();
1034 
1035   // total number of bytes written to the disk
1036   julong bytes_written() const          { return (julong) _backend.get_written(); }
1037 
1038   char const* error() const             { return _backend.error(); }
1039 
1040   // writer functions
1041   void write_raw(void* s, size_t len);
1042   void write_u1(u1 x);
1043   void write_u2(u2 x);
1044   void write_u4(u4 x);
1045   void write_u8(u8 x);
1046   void write_objectID(oop o);
1047   void write_symbolID(Symbol* o);
1048   void write_classID(Klass* k);
1049   void write_id(u4 x);
1050 
1051   // Start a new sub-record. Starts a new heap dump segment if needed.
1052   void start_sub_record(u1 tag, u4 len);
1053   // Ends the current sub-record.
1054   void end_sub_record();
1055   // Finishes the current dump segment if not already finished.
1056   void finish_dump_segment();

1057 
1058   // Called by threads used for parallel writing.
1059   void writer_loop()                    { _backend.thread_loop(false); }
1060   // Called when finished to release the threads.
1061   void deactivate()                     { _backend.deactivate(); }
1062 };











1063 
1064 DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
1065   _buffer(NULL),
1066   _size(0),
1067   _pos(0),
1068   _in_dump_segment(false),
1069   _backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
1070   flush();
1071 }
1072 
1073 DumpWriter::~DumpWriter() {









1074   flush();



1075 }
1076 
1077 void DumpWriter::write_fast(void* s, size_t len) {
1078   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
1079   assert(buffer_size() - position() >= len, "Must fit");
1080   debug_only(_sub_record_left -= len);












1081 
1082   memcpy(buffer() + position(), s, len);
1083   set_position(position() + len);
1084 }
1085 
1086 bool DumpWriter::can_write_fast(size_t len) {
1087   return buffer_size() - position() >= len;
1088 }
1089 
1090 // write raw bytes
1091 void DumpWriter::write_raw(void* s, size_t len) {
1092   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
1093   debug_only(_sub_record_left -= len);
1094 
1095   // flush buffer to make room.
1096   while (len > buffer_size() - position()) {
1097     assert(!_in_dump_segment || _is_huge_sub_record, "Cannot overflow in non-huge sub-record.");





1098 
1099     size_t to_write = buffer_size() - position();
1100     memcpy(buffer() + position(), s, to_write);
1101     s = (void*) ((char*) s + to_write);
1102     len -= to_write;
1103     set_position(position() + to_write);
1104     flush();
1105   }
1106 
1107   memcpy(buffer() + position(), s, len);
1108   set_position(position() + len);
1109 }
1110 
1111 // flush any buffered bytes to the file
1112 void DumpWriter::flush() {
1113   _backend.get_new_buffer(&_buffer, &_pos, &_size);
1114 }
1115 
1116 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
1117 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
1118                                       else write_raw((p), (len)); } while (0)
1119 
1120 void DumpWriter::write_u1(u1 x) {
1121   WRITE_KNOWN_TYPE((void*) &x, 1);
1122 }
1123 
1124 void DumpWriter::write_u2(u2 x) {
1125   u2 v;
1126   Bytes::put_Java_u2((address)&v, x);
1127   WRITE_KNOWN_TYPE((void*)&v, 2);
1128 }
1129 
1130 void DumpWriter::write_u4(u4 x) {
1131   u4 v;
1132   Bytes::put_Java_u4((address)&v, x);
1133   WRITE_KNOWN_TYPE((void*)&v, 4);
1134 }
1135 
1136 void DumpWriter::write_u8(u8 x) {
1137   u8 v;
1138   Bytes::put_Java_u8((address)&v, x);
1139   WRITE_KNOWN_TYPE((void*)&v, 8);
1140 }
1141 
1142 void DumpWriter::write_objectID(oop o) {
1143   address a = cast_from_oop<address>(o);
1144 #ifdef _LP64
1145   write_u8((u8)a);
1146 #else
1147   write_u4((u4)a);
1148 #endif
1149 }
1150 
1151 void DumpWriter::write_symbolID(Symbol* s) {
1152   address a = (address)((uintptr_t)s);
1153 #ifdef _LP64
1154   write_u8((u8)a);
1155 #else
1156   write_u4((u4)a);
1157 #endif
1158 }
1159 


1174   if (_in_dump_segment) {
1175     assert(_sub_record_left == 0, "Last sub-record not written completely");
1176     assert(_sub_record_ended, "sub-record must have ended");
1177 
1178     // Fix up the dump segment length if we haven't written a huge sub-record last
1179     // (in which case the segment length was already set to the correct value initially).
1180     if (!_is_huge_sub_record) {
1181       assert(position() > dump_segment_header_size, "Dump segment should have some content");
1182       Bytes::put_Java_u4((address) (buffer() + 5), (u4) (position() - dump_segment_header_size));
1183     }
1184 
1185     flush();
1186     _in_dump_segment = false;
1187   }
1188 }
1189 
1190 void DumpWriter::start_sub_record(u1 tag, u4 len) {
1191   if (!_in_dump_segment) {
1192     if (position() > 0) {
1193       flush();

1194     }
1195 
1196     assert(position() == 0, "Must be at the start");
1197 
1198     write_u1(HPROF_HEAP_DUMP_SEGMENT);
1199     write_u4(0); // timestamp
1200     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
1201     // this is already the correct length, since we don't add more sub-records.
1202     write_u4(len);
1203     _in_dump_segment = true;
1204     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
1205   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
1206     // This object will not fit in completely or the last sub-record was huge.
1207     // Finish the current segement and try again.
1208     finish_dump_segment();
1209     start_sub_record(tag, len);
1210 
1211     return;
1212   }
1213 
1214   debug_only(_sub_record_left = len);
1215   debug_only(_sub_record_ended = false);
1216 
1217   write_u1(tag);


2069   }
2070 
2071   if (DumperSupport::mask_dormant_archived_object(o) == NULL) {
2072     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
2073     return;
2074   }
2075 
2076   if (o->is_instance()) {
2077     // create a HPROF_GC_INSTANCE record for each object
2078     DumperSupport::dump_instance(writer(), o);
2079   } else if (o->is_objArray()) {
2080     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2081     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2082   } else if (o->is_typeArray()) {
2083     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2084     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2085   }
2086 }
2087 
2088 // The VM operation that performs the heap dump
2089 class VM_HeapDumper : public VM_GC_Operation, public AbstractGangTask {
2090  private:
2091   static VM_HeapDumper* _global_dumper;
2092   static DumpWriter*    _global_writer;
2093   DumpWriter*           _local_writer;
2094   JavaThread*           _oome_thread;
2095   Method*               _oome_constructor;
2096   bool _gc_before_heap_dump;
2097   GrowableArray<Klass*>* _klass_map;
2098   ThreadStackTrace** _stack_traces;
2099   int _num_threads;
2100 
2101   // accessors and setters
2102   static VM_HeapDumper* dumper()         {  assert(_global_dumper != NULL, "Error"); return _global_dumper; }
2103   static DumpWriter* writer()            {  assert(_global_writer != NULL, "Error"); return _global_writer; }
2104   void set_global_dumper() {
2105     assert(_global_dumper == NULL, "Error");
2106     _global_dumper = this;
2107   }
2108   void set_global_writer() {
2109     assert(_global_writer == NULL, "Error");


2125   // writes a HPROF_GC_CLASS_DUMP records for a given basic type
2126   // array (and each multi-dimensional array too)
2127   static void do_basic_type_array_class_dump(Klass* k);
2128 
2129   // HPROF_GC_ROOT_THREAD_OBJ records
2130   int do_thread(JavaThread* thread, u4 thread_serial_num);
2131   void do_threads();
2132 
2133   void add_class_serial_number(Klass* k, int serial_num) {
2134     _klass_map->at_put_grow(serial_num, k);
2135   }
2136 
2137   // HPROF_TRACE and HPROF_FRAME records
2138   void dump_stack_traces();
2139 
2140  public:
2141   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome) :
2142     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2143                     GCCause::_heap_dump /* GC Cause */,
2144                     0 /* total full collections, dummy, ignored */,
2145                     gc_before_heap_dump),
2146     AbstractGangTask("dump heap") {
2147     _local_writer = writer;
2148     _gc_before_heap_dump = gc_before_heap_dump;
2149     _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
2150     _stack_traces = NULL;
2151     _num_threads = 0;
2152     if (oome) {
2153       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2154       // get OutOfMemoryError zero-parameter constructor
2155       InstanceKlass* oome_ik = SystemDictionary::OutOfMemoryError_klass();
2156       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2157                                                           vmSymbols::void_method_signature());
2158       // get thread throwing OOME when generating the heap dump at OOME
2159       _oome_thread = JavaThread::current();
2160     } else {
2161       _oome_thread = NULL;
2162       _oome_constructor = NULL;
2163     }
2164   }
2165   ~VM_HeapDumper() {
2166     if (_stack_traces != NULL) {
2167       for (int i=0; i < _num_threads; i++) {
2168         delete _stack_traces[i];
2169       }
2170       FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
2171     }
2172     delete _klass_map;
2173   }
2174 
2175   VMOp_Type type() const { return VMOp_HeapDumper; }
2176   void doit();
2177   void work(uint worker_id);
2178 };
2179 
2180 
2181 VM_HeapDumper* VM_HeapDumper::_global_dumper = NULL;
2182 DumpWriter*    VM_HeapDumper::_global_writer = NULL;
2183 
2184 bool VM_HeapDumper::skip_operation() const {
2185   return false;
2186 }
2187 
2188 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2189 void DumperSupport::end_of_dump(DumpWriter* writer) {
2190   writer->finish_dump_segment();
2191 
2192   writer->write_u1(HPROF_HEAP_DUMP_END);
2193   writer->write_u4(0);
2194   writer->write_u4(0);
2195 }
2196 
2197 // writes a HPROF_LOAD_CLASS record for the class (and each of its
2198 // array classes)
2199 void VM_HeapDumper::do_load_class(Klass* k) {
2200   static u4 class_serial_num = 0;


2389 
2390   HandleMark hm;
2391   CollectedHeap* ch = Universe::heap();
2392 
2393   ch->ensure_parsability(false); // must happen, even if collection does
2394                                  // not happen (e.g. due to GCLocker)
2395 
2396   if (_gc_before_heap_dump) {
2397     if (GCLocker::is_active()) {
2398       warning("GC locker is held; pre-heapdump GC was skipped");
2399     } else {
2400       ch->collect_as_vm_thread(GCCause::_heap_dump);
2401     }
2402   }
2403 
2404   // At this point we should be the only dumper active, so
2405   // the following should be safe.
2406   set_global_dumper();
2407   set_global_writer();
2408 
2409   WorkGang* gang = UseShenandoahGC ? NULL : ch->get_safepoint_workers();
2410 
2411   if (gang == NULL) {
2412     work(0);
2413   } else {
2414     gang->run_task(this);
2415   }
2416 
2417   // Now we clear the global variables, so that a future dumper might run.
2418   clear_global_dumper();
2419   clear_global_writer();
2420 }
2421 
2422 void VM_HeapDumper::work(uint worker_id) {
2423   if (worker_id != 0) {
2424     writer()->writer_loop();
2425     return;
2426   }
2427 
2428   // Write the file header - we always use 1.0.
2429   const char* header = "JAVA PROFILE 1.0.2";
2430 
2431   // header is few bytes long - no chance to overflow int
2432   writer()->write_raw((void*)header, (int)strlen(header));
2433   writer()->write_u1(0); // terminator
2434   writer()->write_u4(oopSize);
2435   // timestamp is current time in ms
2436   writer()->write_u8(os::javaTimeMillis());
2437 
2438   // HPROF_UTF8 records
2439   SymbolTableDumper sym_dumper(writer());
2440   SymbolTable::symbols_do(&sym_dumper);
2441 
2442   // write HPROF_LOAD_CLASS records
2443   {
2444     LockedClassesDo locked_load_classes(&do_load_class);
2445     ClassLoaderDataGraph::classes_do(&locked_load_classes);
2446   }
2447   Universe::basic_type_classes_do(&do_load_class);
2448 


2471 
2472   // HPROF_GC_ROOT_MONITOR_USED
2473   MonitorUsedDumper mon_dumper(writer());
2474   ObjectSynchronizer::oops_do(&mon_dumper);
2475 
2476   // HPROF_GC_ROOT_JNI_GLOBAL
2477   JNIGlobalsDumper jni_dumper(writer());
2478   JNIHandles::oops_do(&jni_dumper);
2479   Universe::oops_do(&jni_dumper);  // technically not jni roots, but global roots
2480                                    // for things like preallocated throwable backtraces
2481 
2482   // HPROF_GC_ROOT_STICKY_CLASS
2483   // These should be classes in the NULL class loader data, and not all classes
2484   // if !ClassUnloading
2485   StickyClassDumper class_dumper(writer());
2486   ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2487 
2488   // Writes the HPROF_HEAP_DUMP_END record.
2489   DumperSupport::end_of_dump(writer());
2490 
2491   // We are done with writing. Release the worker threads.
2492   writer()->deactivate();

2493 }
2494 
2495 void VM_HeapDumper::dump_stack_traces() {
2496   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
2497   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
2498   writer()->write_u4((u4) STACK_TRACE_ID);
2499   writer()->write_u4(0);                    // thread number
2500   writer()->write_u4(0);                    // frame count
2501 
2502   _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
2503   int frame_serial_num = 0;
2504   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
2505     oop threadObj = thread->threadObj();
2506     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
2507       // dump thread stack trace
2508       ResourceMark rm;
2509       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
2510       stack_trace->dump_stack_at_safepoint(-1);
2511       _stack_traces[_num_threads++] = stack_trace;
2512 
2513       // write HPROF_FRAME records for this thread's stack trace
2514       int depth = stack_trace->get_stack_depth();
2515       int thread_frame_start = frame_serial_num;
2516       int extra_frames = 0;
2517       // write fake frame that makes it look like the thread, which caused OOME,
2518       // is in the OutOfMemoryError zero-parameter constructor
2519       if (thread == _oome_thread && _oome_constructor != NULL) {
2520         int oome_serial_num = _klass_map->find(_oome_constructor->method_holder());
2521         // the class serial number starts from 1
2522         assert(oome_serial_num > 0, "OutOfMemoryError class not found");
2523         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
2524                                         _oome_constructor, 0);
2525         extra_frames++;
2526       }
2527       for (int j=0; j < depth; j++) {
2528         StackFrameInfo* frame = stack_trace->stack_frame_at(j);


2531         // the class serial number starts from 1
2532         assert(class_serial_num > 0, "class not found");
2533         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
2534       }
2535       depth += extra_frames;
2536 
2537       // write HPROF_TRACE record for one thread
2538       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
2539       int stack_serial_num = _num_threads + STACK_TRACE_ID;
2540       writer()->write_u4(stack_serial_num);      // stack trace serial number
2541       writer()->write_u4((u4) _num_threads);     // thread serial number
2542       writer()->write_u4(depth);                 // frame count
2543       for (int j=1; j <= depth; j++) {
2544         writer()->write_id(thread_frame_start + j);
2545       }
2546     }
2547   }
2548 }
2549 
2550 // dump the heap to given path.
2551 int HeapDumper::dump(const char* path, outputStream* out, int compression) {
2552   assert(path != NULL && strlen(path) > 0, "path missing");
2553 
2554   // print message in interactive case
2555   if (out != NULL) {
2556     out->print_cr("Dumping heap to %s ...", path);
2557     timer()->start();
2558   }
2559 
2560   // create JFR event
2561   EventHeapDump event;
2562 
2563   AbstractCompressor* compressor = NULL;
2564 
2565   if (compression > 0) {
2566     compressor = new (std::nothrow) GZipComressor(compression);
2567 
2568     if (compressor == NULL) {
2569       set_error("Could not allocate gzip compressor");
2570       return -1;
2571     }
2572   }
2573 
2574   DumpWriter writer(new (std::nothrow) FileWriter(path), compressor);
2575 
2576   if (writer.error() != NULL) {
2577     set_error(writer.error());
2578     if (out != NULL) {
2579       out->print_cr("Unable to create %s: %s", path,
2580         (error() != NULL) ? error() : "reason unknown");
2581     }
2582     return -1;
2583   }
2584 
2585   // generate the dump
2586   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome);
2587   if (Thread::current()->is_VM_thread()) {
2588     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
2589     dumper.doit();
2590   } else {
2591     VMThread::execute(&dumper);
2592   }
2593 
2594   // record any error that the writer may have encountered

2595   set_error(writer.error());
2596 
2597   // emit JFR event
2598   if (error() == NULL) {
2599     event.set_destination(path);
2600     event.set_gcBeforeDump(_gc_before_heap_dump);
2601     event.set_size(writer.bytes_written());
2602     event.set_onOutOfMemoryError(_oome);
2603     event.commit();
2604   }
2605 
2606   // print message in interactive case
2607   if (out != NULL) {
2608     timer()->stop();
2609     if (error() == NULL) {
2610       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
2611                     writer.bytes_written(), timer()->seconds());
2612     } else {
2613       out->print_cr("Dump file is incomplete: %s", writer.error());
2614     }


2621 HeapDumper::~HeapDumper() {
2622   if (timer()->is_active()) {
2623     timer()->stop();
2624   }
2625   set_error(NULL);
2626 }
2627 
2628 
2629 // returns the error string (resource allocated), or NULL
2630 char* HeapDumper::error_as_C_string() const {
2631   if (error() != NULL) {
2632     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
2633     strcpy(str, error());
2634     return str;
2635   } else {
2636     return NULL;
2637   }
2638 }
2639 
2640 // set the error string
2641 void HeapDumper::set_error(char const* error) {
2642   if (_error != NULL) {
2643     os::free(_error);
2644   }
2645   if (error == NULL) {
2646     _error = NULL;
2647   } else {
2648     _error = os::strdup(error);
2649     assert(_error != NULL, "allocation failure");
2650   }
2651 }
2652 
2653 // Called by out-of-memory error reporting by a single Java thread
2654 // outside of a JVM safepoint
2655 void HeapDumper::dump_heap_from_oome() {
2656   HeapDumper::dump_heap(true);
2657 }
2658 
2659 // Called by error reporting by a single Java thread outside of a JVM safepoint,
2660 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
2661 // callers are strictly serialized and guaranteed not to interfere below. For more


< prev index next >