1 /*
   2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classLoaderData.inline.hpp"
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/gcVMOperations.hpp"
  35 #include "gc/shared/workgroup.hpp"
  36 #include "jfr/jfrEvents.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "oops/objArrayOop.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/typeArrayOop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/javaCalls.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/os.inline.hpp"
  49 #include "runtime/reflectionUtils.hpp"
  50 #include "runtime/thread.inline.hpp"
  51 #include "runtime/threadSMR.hpp"
  52 #include "runtime/vframe.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "runtime/vmOperations.hpp"
  55 #include "services/heapDumper.hpp"
  56 #include "services/threadService.hpp"
  57 #include "utilities/macros.hpp"
  58 #include "utilities/ostream.hpp"
  59 
  60 /*
  61  * HPROF binary format - description copied from:
  62  *   src/share/demo/jvmti/hprof/hprof_io.c
  63  *
  64  *
  65  *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  66  *
  67  *  u4        size of identifiers. Identifiers are used to represent
  68  *            UTF8 strings, objects, stack traces, etc. They usually
  69  *            have the same size as host pointers. For example, on
  70  *            Solaris and Win32, the size is 4.
  71  * u4         high word
  72  * u4         low word    number of milliseconds since 0:00 GMT, 1/1/70
  73  * [record]*  a sequence of records.
  74  *
  75  *
  76  * Record format:
  77  *
  78  * u1         a TAG denoting the type of the record
  79  * u4         number of *microseconds* since the time stamp in the
  80  *            header. (wraps around in a little more than an hour)
  81  * u4         number of bytes *remaining* in the record. Note that
  82  *            this number excludes the tag and the length field itself.
  83  * [u1]*      BODY of the record (a sequence of bytes)
  84  *
  85  *
  86  * The following TAGs are supported:
  87  *
  88  * TAG           BODY       notes
  89  *----------------------------------------------------------
  90  * HPROF_UTF8               a UTF8-encoded name
  91  *
  92  *               id         name ID
  93  *               [u1]*      UTF8 characters (no trailing zero)
  94  *
  95  * HPROF_LOAD_CLASS         a newly loaded class
  96  *
  97  *                u4        class serial number (> 0)
  98  *                id        class object ID
  99  *                u4        stack trace serial number
 100  *                id        class name ID
 101  *
 102  * HPROF_UNLOAD_CLASS       an unloading class
 103  *
 104  *                u4        class serial_number
 105  *
 106  * HPROF_FRAME              a Java stack frame
 107  *
 108  *                id        stack frame ID
 109  *                id        method name ID
 110  *                id        method signature ID
 111  *                id        source file name ID
 112  *                u4        class serial number
 113  *                i4        line number. >0: normal
 114  *                                       -1: unknown
 115  *                                       -2: compiled method
 116  *                                       -3: native method
 117  *
 118  * HPROF_TRACE              a Java stack trace
 119  *
 120  *               u4         stack trace serial number
 121  *               u4         thread serial number
 122  *               u4         number of frames
 123  *               [id]*      stack frame IDs
 124  *
 125  *
 126  * HPROF_ALLOC_SITES        a set of heap allocation sites, obtained after GC
 127  *
 128  *               u2         flags 0x0001: incremental vs. complete
 129  *                                0x0002: sorted by allocation vs. live
 130  *                                0x0004: whether to force a GC
 131  *               u4         cutoff ratio
 132  *               u4         total live bytes
 133  *               u4         total live instances
 134  *               u8         total bytes allocated
 135  *               u8         total instances allocated
 136  *               u4         number of sites that follow
 137  *               [u1        is_array: 0:  normal object
 138  *                                    2:  object array
 139  *                                    4:  boolean array
 140  *                                    5:  char array
 141  *                                    6:  float array
 142  *                                    7:  double array
 143  *                                    8:  byte array
 144  *                                    9:  short array
 145  *                                    10: int array
 146  *                                    11: long array
 147  *                u4        class serial number (may be zero during startup)
 148  *                u4        stack trace serial number
 149  *                u4        number of bytes alive
 150  *                u4        number of instances alive
 151  *                u4        number of bytes allocated
 152  *                u4]*      number of instance allocated
 153  *
 154  * HPROF_START_THREAD       a newly started thread.
 155  *
 156  *               u4         thread serial number (> 0)
 157  *               id         thread object ID
 158  *               u4         stack trace serial number
 159  *               id         thread name ID
 160  *               id         thread group name ID
 161  *               id         thread group parent name ID
 162  *
 163  * HPROF_END_THREAD         a terminating thread.
 164  *
 165  *               u4         thread serial number
 166  *
 167  * HPROF_HEAP_SUMMARY       heap summary
 168  *
 169  *               u4         total live bytes
 170  *               u4         total live instances
 171  *               u8         total bytes allocated
 172  *               u8         total instances allocated
 173  *
 174  * HPROF_HEAP_DUMP          denote a heap dump
 175  *
 176  *               [heap dump sub-records]*
 177  *
 178  *                          There are four kinds of heap dump sub-records:
 179  *
 180  *               u1         sub-record type
 181  *
 182  *               HPROF_GC_ROOT_UNKNOWN         unknown root
 183  *
 184  *                          id         object ID
 185  *
 186  *               HPROF_GC_ROOT_THREAD_OBJ      thread object
 187  *
 188  *                          id         thread object ID  (may be 0 for a
 189  *                                     thread newly attached through JNI)
 190  *                          u4         thread sequence number
 191  *                          u4         stack trace sequence number
 192  *
 193  *               HPROF_GC_ROOT_JNI_GLOBAL      JNI global ref root
 194  *
 195  *                          id         object ID
 196  *                          id         JNI global ref ID
 197  *
 198  *               HPROF_GC_ROOT_JNI_LOCAL       JNI local ref
 199  *
 200  *                          id         object ID
 201  *                          u4         thread serial number
 202  *                          u4         frame # in stack trace (-1 for empty)
 203  *
 204  *               HPROF_GC_ROOT_JAVA_FRAME      Java stack frame
 205  *
 206  *                          id         object ID
 207  *                          u4         thread serial number
 208  *                          u4         frame # in stack trace (-1 for empty)
 209  *
 210  *               HPROF_GC_ROOT_NATIVE_STACK    Native stack
 211  *
 212  *                          id         object ID
 213  *                          u4         thread serial number
 214  *
 215  *               HPROF_GC_ROOT_STICKY_CLASS    System class
 216  *
 217  *                          id         object ID
 218  *
 219  *               HPROF_GC_ROOT_THREAD_BLOCK    Reference from thread block
 220  *
 221  *                          id         object ID
 222  *                          u4         thread serial number
 223  *
 224  *               HPROF_GC_ROOT_MONITOR_USED    Busy monitor
 225  *
 226  *                          id         object ID
 227  *
 228  *               HPROF_GC_CLASS_DUMP           dump of a class object
 229  *
 230  *                          id         class object ID
 231  *                          u4         stack trace serial number
 232  *                          id         super class object ID
 233  *                          id         class loader object ID
 234  *                          id         signers object ID
 235  *                          id         protection domain object ID
 236  *                          id         reserved
 237  *                          id         reserved
 238  *
 239  *                          u4         instance size (in bytes)
 240  *
 241  *                          u2         size of constant pool
 242  *                          [u2,       constant pool index,
 243  *                           ty,       type
 244  *                                     2:  object
 245  *                                     4:  boolean
 246  *                                     5:  char
 247  *                                     6:  float
 248  *                                     7:  double
 249  *                                     8:  byte
 250  *                                     9:  short
 251  *                                     10: int
 252  *                                     11: long
 253  *                           vl]*      and value
 254  *
 255  *                          u2         number of static fields
 256  *                          [id,       static field name,
 257  *                           ty,       type,
 258  *                           vl]*      and value
 259  *
 260  *                          u2         number of inst. fields (not inc. super)
 261  *                          [id,       instance field name,
 262  *                           ty]*      type
 263  *
 264  *               HPROF_GC_INSTANCE_DUMP        dump of a normal object
 265  *
 266  *                          id         object ID
 267  *                          u4         stack trace serial number
 268  *                          id         class object ID
 269  *                          u4         number of bytes that follow
 270  *                          [vl]*      instance field values (class, followed
 271  *                                     by super, super's super ...)
 272  *
 273  *               HPROF_GC_OBJ_ARRAY_DUMP       dump of an object array
 274  *
 275  *                          id         array object ID
 276  *                          u4         stack trace serial number
 277  *                          u4         number of elements
 278  *                          id         array class ID
 279  *                          [id]*      elements
 280  *
 281  *               HPROF_GC_PRIM_ARRAY_DUMP      dump of a primitive array
 282  *
 283  *                          id         array object ID
 284  *                          u4         stack trace serial number
 285  *                          u4         number of elements
 286  *                          u1         element type
 287  *                                     4:  boolean array
 288  *                                     5:  char array
 289  *                                     6:  float array
 290  *                                     7:  double array
 291  *                                     8:  byte array
 292  *                                     9:  short array
 293  *                                     10: int array
 294  *                                     11: long array
 295  *                          [u1]*      elements
 296  *
 297  * HPROF_CPU_SAMPLES        a set of sample traces of running threads
 298  *
 299  *                u4        total number of samples
 300  *                u4        # of traces
 301  *               [u4        # of samples
 302  *                u4]*      stack trace serial number
 303  *
 304  * HPROF_CONTROL_SETTINGS   the settings of on/off switches
 305  *
 306  *                u4        0x00000001: alloc traces on/off
 307  *                          0x00000002: cpu sampling on/off
 308  *                u2        stack trace depth
 309  *
 310  *
 311  * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
 312  * be generated as a sequence of heap dump segments. This sequence is
 313  * terminated by an end record. The additional tags allowed by format
 314  * "JAVA PROFILE 1.0.2" are:
 315  *
 316  * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
 317  *
 318  *               [heap dump sub-records]*
 319  *               The same sub-record types allowed by HPROF_HEAP_DUMP
 320  *
 321  * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
 322  *
 323  */
 324 
 325 
 326 // HPROF tags
 327 
 328 typedef enum {
 329   // top-level records
 330   HPROF_UTF8                    = 0x01,
 331   HPROF_LOAD_CLASS              = 0x02,
 332   HPROF_UNLOAD_CLASS            = 0x03,
 333   HPROF_FRAME                   = 0x04,
 334   HPROF_TRACE                   = 0x05,
 335   HPROF_ALLOC_SITES             = 0x06,
 336   HPROF_HEAP_SUMMARY            = 0x07,
 337   HPROF_START_THREAD            = 0x0A,
 338   HPROF_END_THREAD              = 0x0B,
 339   HPROF_HEAP_DUMP               = 0x0C,
 340   HPROF_CPU_SAMPLES             = 0x0D,
 341   HPROF_CONTROL_SETTINGS        = 0x0E,
 342 
 343   // 1.0.2 record types
 344   HPROF_HEAP_DUMP_SEGMENT       = 0x1C,
 345   HPROF_HEAP_DUMP_END           = 0x2C,
 346 
 347   // field types
 348   HPROF_ARRAY_OBJECT            = 0x01,
 349   HPROF_NORMAL_OBJECT           = 0x02,
 350   HPROF_BOOLEAN                 = 0x04,
 351   HPROF_CHAR                    = 0x05,
 352   HPROF_FLOAT                   = 0x06,
 353   HPROF_DOUBLE                  = 0x07,
 354   HPROF_BYTE                    = 0x08,
 355   HPROF_SHORT                   = 0x09,
 356   HPROF_INT                     = 0x0A,
 357   HPROF_LONG                    = 0x0B,
 358 
 359   // data-dump sub-records
 360   HPROF_GC_ROOT_UNKNOWN         = 0xFF,
 361   HPROF_GC_ROOT_JNI_GLOBAL      = 0x01,
 362   HPROF_GC_ROOT_JNI_LOCAL       = 0x02,
 363   HPROF_GC_ROOT_JAVA_FRAME      = 0x03,
 364   HPROF_GC_ROOT_NATIVE_STACK    = 0x04,
 365   HPROF_GC_ROOT_STICKY_CLASS    = 0x05,
 366   HPROF_GC_ROOT_THREAD_BLOCK    = 0x06,
 367   HPROF_GC_ROOT_MONITOR_USED    = 0x07,
 368   HPROF_GC_ROOT_THREAD_OBJ      = 0x08,
 369   HPROF_GC_CLASS_DUMP           = 0x20,
 370   HPROF_GC_INSTANCE_DUMP        = 0x21,
 371   HPROF_GC_OBJ_ARRAY_DUMP       = 0x22,
 372   HPROF_GC_PRIM_ARRAY_DUMP      = 0x23
 373 } hprofTag;
 374 
 375 // Default stack trace ID (used for dummy HPROF_TRACE record)
 376 enum {
 377   STACK_TRACE_ID = 1,
 378   INITIAL_CLASS_COUNT = 200
 379 };
 380 
 381 class GzipBackend;
 382 class WriteWorkList;
 383 
 384 // Interface for a compression  implementation.
 385 class AbstractCompressor : public CHeapObj<mtInternal> {
 386 public:
 387   virtual ~AbstractCompressor() { }
 388 
 389   // Initialized the compressor. Return a static error message in case of an error.
 390   // Otherwise it initized the needed out and tmp size for the given block size.
 391   virtual char const* init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size) = 0;
 392 
 393   // Does the actual compression. Returns NULL on success and a static error message otherwise.
 394   // Sets the 'compressed_size'.
 395   virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size,
 396                                size_t* compressed_size) = 0;
 397 };
 398 
 399 // Interface for a writer implementation.
 400 class AbstractWriter : public CHeapObj<mtInternal> {
 401 public:
 402   virtual ~AbstractWriter() { }
 403 
 404   // Opens the writer. Returns NULL on success and a static error message otherwise.
 405   virtual char const* open_writer() = 0;
 406 
 407   // Does the write. Returns NULL on success and a static error message otherwise.
 408   virtual char const* write_buf(char* buf, ssize_t size) = 0;
 409 };
 410 
 411 
 412 typedef char const* (*GzipInitFunc)(size_t, size_t*, size_t*, int);
 413 typedef size_t(*GzipFunc)(char*, size_t, char*, size_t, char*, size_t, int, char*, char const**);
 414 
 415 class GZipComressor : public AbstractCompressor {
 416 private:
 417   int _level;
 418   size_t _block_size;
 419   bool _is_first;
 420 
 421   GzipInitFunc gzip_init_func;
 422   GzipFunc gzip_func;
 423 
 424   void* load_gzip_func(char const* name);
 425 
 426 public:
 427   GZipComressor(int level) : _level(level), _block_size(0), _is_first(false) {
 428   }
 429 
 430   virtual char const* init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size);
 431 
 432   virtual char const* compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size,
 433                                size_t* compressed_size);
 434 };
 435 
 436 void* GZipComressor::load_gzip_func(char const* name) {
 437   char path[JVM_MAXPATHLEN];
 438   char ebuf[1024];
 439   void* handle;
 440 
 441   if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
 442     handle = os::dll_load(path, ebuf, sizeof ebuf);
 443 
 444     if (handle != NULL) {
 445       return os::dll_lookup(handle, name);
 446     }
 447   }
 448 
 449   return NULL;
 450 }
 451 
 452 char const* GZipComressor::init(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size) {
 453   _block_size = block_size;
 454   _is_first = true;
 455 
 456   gzip_func = (GzipFunc) load_gzip_func("ZIP_GZip_Fully");
 457 
 458   if (gzip_func == NULL) {
 459     return  "Cannot get ZIP_GZip_Fully function";
 460   } else {
 461     gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams");
 462 
 463     if (gzip_init_func == NULL) {
 464       return "Cannot get ZIP_GZip_InitParams function";
 465     } else {
 466       return gzip_init_func(block_size, needed_out_size, needed_tmp_size, _level);
 467     }
 468   }
 469 }
 470 
 471 char const* GZipComressor::compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp,
 472                                     size_t tmp_size, size_t* compressed_size) {
 473   char const* msg = NULL;
 474 
 475   if (_is_first) {
 476     char buf[128];
 477     jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT, _block_size);
 478     *compressed_size = gzip_func(in, in_size, out, out_size, tmp, tmp_size, _level, buf, &msg);
 479     _is_first = false;
 480   } else {
 481     *compressed_size = gzip_func(in, in_size, out, out_size, tmp, tmp_size, _level, NULL, &msg);
 482   }
 483 
 484   return msg;
 485 }
 486 
 487 
 488 // A writer for a file.
 489 class FileWriter : public AbstractWriter {
 490 private:
 491   char const* _path;
 492   int _fd;
 493 
 494 public:
 495   FileWriter(char const* path) : _path(path), _fd(-1) { }
 496 
 497   ~FileWriter();
 498 
 499   // Opens the writer. Returns NULL on success and a static error message otherwise.
 500   virtual char const* open_writer();
 501 
 502   // Does the write. Returns NULL on success and a static error message otherwise.
 503   virtual char const* write_buf(char* buf, ssize_t size);
 504 };
 505 
 506 char const* FileWriter::open_writer() {
 507   assert(_fd < 0, "Must not already be open");
 508 
 509   _fd = os::create_binary_file(_path, false);    // don't replace existing file
 510 
 511   if (_fd < 0) {
 512     return os::strerror(errno);
 513   }
 514 
 515   return NULL;
 516 }
 517 
 518 FileWriter::~FileWriter() {
 519   if (_fd >= 0) {
 520     os::close(_fd);
 521     _fd = -1;
 522   }
 523 }
 524 
 525 char const* FileWriter::write_buf(char* buf, ssize_t size) {
 526   assert(_fd >= 0, "Must be open");
 527   assert(size > 0, "Must write at least one byte");
 528 
 529   ssize_t n = (ssize_t) os::write(_fd, buf, (uint) size);
 530 
 531   if (n <= 0) {
 532     return os::strerror(errno);
 533   }
 534 
 535   return NULL;
 536 }
 537 
 538 // The data needed to write a single buffer (and compress it optionally).
 539 struct WriteWork {
 540   // The id of the work.
 541   int64_t id;
 542 
 543   // The input buffer where the raw data is
 544   char* in;
 545   size_t in_used;
 546   size_t in_max;
 547 
 548   // The output buffer where the compressed data is. Is NULL when compression is disabled.
 549   char* out;
 550   size_t out_used;
 551   size_t out_max;
 552 
 553   // The temporary space needed for compression. Is NULL when compression is disabled.
 554   char* tmp;
 555   size_t tmp_max;
 556 
 557   // Used to link works into lists.
 558   WriteWork* _next;
 559   WriteWork* _prev;
 560 };
 561 
 562 // A list for works.
 563 class WorkList {
 564 private:
 565   WriteWork _head;
 566 
 567   void insert(WriteWork* before, WriteWork* work);
 568   WriteWork* remove(WriteWork* work);
 569 
 570 public:
 571   WorkList();
 572 
 573   // Return true if the list is empty.
 574   bool is_empty() { return _head._next == &_head; }
 575 
 576   // Adds to the beginning of the list.
 577   void add_first(WriteWork* work) { insert(&_head, work); }
 578 
 579   // Adds to the end of the list.
 580   void add_last(WriteWork* work) { insert(_head._prev, work); }
 581 
 582   // Adds so the ids are ordered.
 583   void add_by_id(WriteWork* work);
 584 
 585   // Returns the first element.
 586   WriteWork* first() { return is_empty() ? NULL : _head._next; }
 587 
 588   // Returns the last element.
 589   WriteWork* last() { return is_empty() ? NULL : _head._prev; }
 590 
 591   // Removes the first element. Returns NULL is empty.
 592   WriteWork* remove_first() { return remove(first()); }
 593 
 594   // Removes the last element. Returns NULL is empty.
 595   WriteWork* remove_last() { return remove(first()); }
 596 };
 597 
 598 WorkList::WorkList() {
 599   _head._next = &_head;
 600   _head._prev = &_head;
 601 }
 602 
 603 void WorkList::insert(WriteWork* before, WriteWork* work) {
 604   work->_prev = before;
 605   work->_next = before->_next;
 606   before->_next = work;
 607   work->_next->_prev = work;
 608 }
 609 
 610 WriteWork* WorkList::remove(WriteWork* work) {
 611   if (work != NULL) {
 612     assert(work->_next != work, "Invalid next");
 613     assert(work->_prev != work, "Invalid prev");
 614     work->_prev->_next = work->_next;;
 615     work->_next->_prev = work->_prev;
 616     work->_next = NULL;
 617     work->_prev = NULL;
 618   }
 619 
 620   return work;
 621 }
 622 
 623 void WorkList::add_by_id(WriteWork* work) {
 624   if (is_empty()) {
 625     add_first(work);
 626   } else {
 627     WriteWork* last_curr = &_head;
 628     WriteWork* curr = _head._next;
 629 
 630     while (curr->id < work->id) {
 631       last_curr = curr;
 632       curr = curr->_next;
 633 
 634       if (curr == &_head) {
 635         add_last(work);
 636         return;
 637       }
 638     }
 639 
 640     insert(last_curr, work);
 641   }
 642 }
 643 
 644 // The backend used to write data (and optionally compress it).
 645 class CompressionBackend : StackObj {
 646   bool _active;
 647   char const * _err;
 648 
 649   int _nr_of_threads;
 650   int _works_created;
 651   bool _work_creation_failed;
 652 
 653   int64_t _id_to_write;
 654   int64_t _next_id;
 655 
 656   size_t _in_size;
 657   size_t _max_waste;
 658   size_t _out_size;
 659   size_t _tmp_size;
 660 
 661   size_t _written;
 662 
 663   AbstractWriter* _writer;
 664   AbstractCompressor* _compressor;
 665 
 666   Monitor* _lock;
 667 
 668   WriteWork* _current;
 669   WorkList _to_compress;
 670   WorkList _unused;
 671   WorkList _finished;
 672 
 673   void set_error(char const* new_error);
 674 
 675   WriteWork* allocate_work(size_t in_size, size_t out_size, size_t tmp_size);
 676   void free_work(WriteWork* work);
 677   void free_work_list(WorkList* list);
 678 
 679   WriteWork* get_work();
 680   void do_compress(WriteWork* work);
 681   void finish_work(WriteWork* work);
 682 
 683 public:
 684   // compressor can be NULL if no compression is used.
 685   // Takes ownership of the writer and compressor.
 686   // block_size is the buffer size of a WriteWork.
 687   // max_waste is the maxiumum number of bytes to leave
 688   // empty in the buffer when it is written.
 689   CompressionBackend(AbstractWriter* writer, AbstractCompressor* compressor,
 690                      size_t block_size, size_t max_waste);
 691 
 692   ~CompressionBackend();
 693 
 694   size_t get_written() const { return _written; }
 695 
 696   char const* error() const { return _err; }
 697 
 698   // Commits the old buffer and sets up a new one.
 699   void get_new_buffer(char** buffer, size_t* used, size_t* max);
 700 
 701   // The entry point for a worker thread. If single_run is true, we only handle one work entry.
 702   void thread_loop(bool single_run);
 703 
 704   // Shuts down the backend, releasing all threads.
 705   void deactivate();
 706 };
 707 
 708 CompressionBackend::CompressionBackend(AbstractWriter* writer, AbstractCompressor* compressor,
 709                                        size_t block_size, size_t max_waste) :
 710   _active(false),
 711   _err(NULL),
 712   _nr_of_threads(0),
 713   _works_created(0),
 714   _work_creation_failed(false),
 715   _id_to_write(0),
 716   _next_id(0),
 717   _in_size(block_size),
 718   _max_waste(max_waste),
 719   _out_size(0),
 720   _tmp_size(0),
 721   _written(0),
 722   _writer(writer),
 723   _compressor(compressor),
 724   _lock(new (std::nothrow) PaddedMonitor(Mutex::leaf, "HProf Compression Backend",
 725         true, Mutex::_safepoint_check_never)) {
 726   if (_writer == NULL) {
 727     set_error("Could not allocate writer");
 728   } else if (_lock == NULL) {
 729     set_error("Could not allocate lock");
 730   } else {
 731     set_error(_writer->open_writer());
 732   }
 733 
 734   if (_compressor != NULL) {
 735     set_error(_compressor->init(_in_size, &_out_size, &_tmp_size));
 736   }
 737 
 738   _current = allocate_work(_in_size, _out_size, _tmp_size);
 739 
 740   if (_current == NULL) {
 741     set_error("Could not allocate memory for buffer");
 742   }
 743 
 744   _active = (_err == NULL);
 745 }
 746 
 747 CompressionBackend::~CompressionBackend() {
 748   assert(!_active, "Must not be active by now");
 749   assert(_nr_of_threads == 0, "Must have no active threads");
 750   assert(_to_compress.is_empty() && _finished.is_empty(), "Still work to do");
 751 
 752   free_work_list(&_unused);
 753   free_work(_current);
 754   assert(_works_created == 0, "All work must have been freed");
 755 
 756   delete _compressor;
 757   delete _writer;
 758   delete _lock;
 759 }
 760 
 761 void CompressionBackend::deactivate() {
 762   assert(_active, "Must be active");
 763 
 764   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 765 
 766   // Make sure we write a partially filled buffer.
 767   if ((_current != NULL) && (_current->in_used > 0)) {
 768     _current->id = _next_id++;
 769     _to_compress.add_last(_current);
 770     _current = NULL;
 771     ml.notify_all();
 772   }
 773 
 774   // Wait for the threads to drain the compression work list.
 775   while (!_to_compress.is_empty()) {
 776     // If we have no threads, compress the current one itself.
 777     if (_nr_of_threads == 0) {
 778       MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 779       thread_loop(true);
 780     } else {
 781       ml.wait();
 782     }
 783   }
 784 
 785   _active = false;
 786   ml.notify_all();
 787 }
 788 
 789 void CompressionBackend::thread_loop(bool single_run) {
 790   // Register if this is a worker thread.
 791   if (!single_run) {
 792     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 793     _nr_of_threads++;
 794   }
 795 
 796   while (true) {
 797     WriteWork* work = get_work();
 798 
 799     if (work == NULL) {
 800       assert(!single_run, "Should never happen for single thread");
 801       MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 802       _nr_of_threads--;
 803       assert(_nr_of_threads >= 0, "Too many threads finished");
 804       ml.notify_all();
 805 
 806       return;
 807     } else {
 808       do_compress(work);
 809       finish_work(work);
 810     }
 811 
 812     if (single_run) {
 813       return;
 814     }
 815   }
 816 }
 817 
 818 void CompressionBackend::set_error(char const* new_error) {
 819   if ((new_error != NULL) && (_err == NULL)) {
 820     _err = new_error;
 821   }
 822 }
 823 
 824 WriteWork* CompressionBackend::allocate_work(size_t in_size, size_t out_size, size_t tmp_size) {
 825   WriteWork* result = (WriteWork*) os::malloc(sizeof(WriteWork), mtInternal);
 826 
 827   if (result == NULL) {
 828     _work_creation_failed = true;
 829     return NULL;
 830   }
 831 
 832   _works_created++;
 833   result->in = (char*) os::malloc(in_size, mtInternal);
 834   result->in_max = in_size;
 835   result->in_used = 0;
 836   result->out = NULL;
 837   result->tmp = NULL;
 838 
 839   if (result->in == NULL) {
 840     goto fail;
 841   }
 842 
 843   if (out_size > 0) {
 844     result->out = (char*) os::malloc(out_size, mtInternal);
 845     result->out_used = 0;
 846     result->out_max = out_size;
 847 
 848     if (result->out == NULL) {
 849       goto fail;
 850     }
 851   }
 852 
 853   if (tmp_size > 0) {
 854     result->tmp = (char*) os::malloc(tmp_size, mtInternal);
 855     result->tmp_max = tmp_size;
 856 
 857     if (result->tmp == NULL) {
 858       goto fail;
 859     }
 860   }
 861 
 862   return result;
 863 
 864 fail:
 865   free_work(result);
 866   _work_creation_failed = true;
 867   return NULL;
 868 }
 869 
 870 void CompressionBackend::free_work(WriteWork* work) {
 871   if (work != NULL) {
 872     os::free(work->in);
 873     os::free(work->out);
 874     os::free(work->tmp);
 875     os::free(work);
 876     --_works_created;
 877   }
 878 }
 879 
 880 void CompressionBackend::free_work_list(WorkList* list) {
 881   while (!list->is_empty()) {
 882     free_work(list->remove_first());
 883   }
 884 }
 885 
 886 WriteWork* CompressionBackend::get_work() {
 887   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 888 
 889   while (_active && _to_compress.is_empty()) {
 890     ml.wait();
 891   }
 892 
 893   return _to_compress.remove_first();
 894 }
 895 
 896 void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max) {
 897   if (_active) {
 898     MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 899 
 900     if (*used > 0) {
 901       _current->in_used += *used;
 902 
 903       // Check if we don not waste more than _max_waste. If yes, write the buffer.
 904       // Otherwise return the rest of the buffer as the new buffer.
 905       if (_current->in_max - _current->in_used <= _max_waste) {
 906         _current->id = _next_id++;
 907         _to_compress.add_last(_current);
 908         _current = NULL;
 909         ml.notify_all();
 910       } else {
 911         *buffer = _current->in + _current->in_used;
 912         *used = 0;
 913         *max = _current->in_max - _current->in_used;
 914 
 915         return;
 916       }
 917     }
 918 
 919     while ((_current == NULL) && _unused.is_empty() && _active) {
 920       // Add more work objects if needed.
 921       if (!_work_creation_failed && (_works_created <= _nr_of_threads)) {
 922         WriteWork* work = allocate_work(_in_size, _out_size, _tmp_size);
 923 
 924         if (work != NULL) {
 925           _unused.add_first(work);
 926         }
 927       } else if (!_to_compress.is_empty() && (_nr_of_threads == 0)) {
 928         // If we have no threads, compress the current one itself.
 929         MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 930         thread_loop(true);
 931       } else {
 932         ml.wait();
 933       }
 934     }
 935 
 936     if (_current == NULL) {
 937       _current = _unused.remove_first();
 938     }
 939 
 940     if (_current != NULL) {
 941       _current->in_used = 0;
 942       _current->out_used = 0;
 943       *buffer = _current->in;
 944       *used = 0;
 945       *max = _current->in_max;
 946 
 947       return;
 948     }
 949   }
 950 
 951   *buffer = NULL;
 952   *used = 0;
 953   *max = 0;
 954 
 955   return;
 956 }
 957 
 958 void CompressionBackend::do_compress(WriteWork* work) {
 959   if (_compressor != NULL) {
 960     char const* msg = _compressor->compress(work->in, work->in_used, work->out, work->out_max,
 961                                             work->tmp, _tmp_size, &work->out_used);
 962 
 963     if (msg != NULL) {
 964       MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 965       set_error(msg);
 966     }
 967   }
 968 }
 969 
 970 void CompressionBackend::finish_work(WriteWork* work) {
 971   MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
 972 
 973   _finished.add_by_id(work);
 974 
 975   // Write all finished works as far as we can.
 976   while (!_finished.is_empty() && (_finished.first()->id == _id_to_write)) {
 977     WriteWork* to_write = _finished.remove_first();
 978     size_t left = _compressor ==  NULL ? to_write->in_used : to_write->out_used;
 979     char* p = _compressor == NULL ? to_write->in : to_write->out;
 980     char const* msg = NULL;
 981 
 982     if (_err == NULL) {
 983       _written += left;
 984       MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
 985       msg = _writer->write_buf(p, (ssize_t) left);
 986     }
 987 
 988     set_error(msg);
 989     _unused.add_first(to_write);
 990     _id_to_write++;
 991   }
 992 
 993   ml.notify_all();
 994 }
 995 
 996 
 997 class DumpWriter : public StackObj {
 998  private:
 999   enum {
1000     io_buffer_max_size = 1*M,
1001     io_buffer_max_waste = 10*K,
1002     dump_segment_header_size = 9
1003   };
1004 
1005   char* _buffer;    // internal buffer
1006   size_t _size;
1007   size_t _pos;
1008 
1009   bool _in_dump_segment; // Are we currently in a dump segment?
1010   bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
1011   DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
1012   DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
1013 
1014   CompressionBackend _backend; // Does the actual writing.
1015 
1016   void flush();
1017 
1018   char* buffer() const                          { return _buffer; }
1019   size_t buffer_size() const                    { return _size; }
1020   size_t position() const                       { return _pos; }
1021   void set_position(size_t pos)                 { _pos = pos; }
1022 
1023   // Can be called if we have enough room in the buffer.
1024   void write_fast(void* s, size_t len);
1025 
1026   // Returns true if we have enough room in the buffer for 'len' bytes.
1027   bool can_write_fast(size_t len);
1028 
1029  public:
1030   // Takes ownership of the writer and compressor.
1031   DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
1032 
1033   ~DumpWriter();
1034 
1035   // total number of bytes written to the disk
1036   julong bytes_written() const          { return (julong) _backend.get_written(); }
1037 
1038   char const* error() const             { return _backend.error(); }
1039 
1040   // writer functions
1041   void write_raw(void* s, size_t len);
1042   void write_u1(u1 x);
1043   void write_u2(u2 x);
1044   void write_u4(u4 x);
1045   void write_u8(u8 x);
1046   void write_objectID(oop o);
1047   void write_symbolID(Symbol* o);
1048   void write_classID(Klass* k);
1049   void write_id(u4 x);
1050 
1051   // Start a new sub-record. Starts a new heap dump segment if needed.
1052   void start_sub_record(u1 tag, u4 len);
1053   // Ends the current sub-record.
1054   void end_sub_record();
1055   // Finishes the current dump segment if not already finished.
1056   void finish_dump_segment();
1057 
1058   // Called by threads used for parallel writing.
1059   void writer_loop()                    { _backend.thread_loop(false); }
1060   // Called when finished to release the threads.
1061   void deactivate()                     { _backend.deactivate(); }
1062 };
1063 
1064 DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
1065   _buffer(NULL),
1066   _size(0),
1067   _pos(0),
1068   _in_dump_segment(false),
1069   _backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
1070   flush();
1071 }
1072 
1073 DumpWriter::~DumpWriter() {
1074   flush();
1075 }
1076 
1077 void DumpWriter::write_fast(void* s, size_t len) {
1078   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
1079   assert(buffer_size() - position() >= len, "Must fit");
1080   debug_only(_sub_record_left -= len);
1081 
1082   memcpy(buffer() + position(), s, len);
1083   set_position(position() + len);
1084 }
1085 
1086 bool DumpWriter::can_write_fast(size_t len) {
1087   return buffer_size() - position() >= len;
1088 }
1089 
1090 // write raw bytes
1091 void DumpWriter::write_raw(void* s, size_t len) {
1092   assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
1093   debug_only(_sub_record_left -= len);
1094 
1095   // flush buffer to make room.
1096   while (len > buffer_size() - position()) {
1097     assert(!_in_dump_segment || _is_huge_sub_record, "Cannot overflow in non-huge sub-record.");
1098 
1099     size_t to_write = buffer_size() - position();
1100     memcpy(buffer() + position(), s, to_write);
1101     s = (void*) ((char*) s + to_write);
1102     len -= to_write;
1103     set_position(position() + to_write);
1104     flush();
1105   }
1106 
1107   memcpy(buffer() + position(), s, len);
1108   set_position(position() + len);
1109 }
1110 
1111 // flush any buffered bytes to the file
1112 void DumpWriter::flush() {
1113   _backend.get_new_buffer(&_buffer, &_pos, &_size);
1114 }
1115 
1116 // Makes sure we inline the fast write into the write_u* functions. This is a big speedup.
1117 #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \
1118                                       else write_raw((p), (len)); } while (0)
1119 
1120 void DumpWriter::write_u1(u1 x) {
1121   WRITE_KNOWN_TYPE((void*) &x, 1);
1122 }
1123 
1124 void DumpWriter::write_u2(u2 x) {
1125   u2 v;
1126   Bytes::put_Java_u2((address)&v, x);
1127   WRITE_KNOWN_TYPE((void*)&v, 2);
1128 }
1129 
1130 void DumpWriter::write_u4(u4 x) {
1131   u4 v;
1132   Bytes::put_Java_u4((address)&v, x);
1133   WRITE_KNOWN_TYPE((void*)&v, 4);
1134 }
1135 
1136 void DumpWriter::write_u8(u8 x) {
1137   u8 v;
1138   Bytes::put_Java_u8((address)&v, x);
1139   WRITE_KNOWN_TYPE((void*)&v, 8);
1140 }
1141 
1142 void DumpWriter::write_objectID(oop o) {
1143   address a = cast_from_oop<address>(o);
1144 #ifdef _LP64
1145   write_u8((u8)a);
1146 #else
1147   write_u4((u4)a);
1148 #endif
1149 }
1150 
1151 void DumpWriter::write_symbolID(Symbol* s) {
1152   address a = (address)((uintptr_t)s);
1153 #ifdef _LP64
1154   write_u8((u8)a);
1155 #else
1156   write_u4((u4)a);
1157 #endif
1158 }
1159 
1160 void DumpWriter::write_id(u4 x) {
1161 #ifdef _LP64
1162   write_u8((u8) x);
1163 #else
1164   write_u4(x);
1165 #endif
1166 }
1167 
1168 // We use java mirror as the class ID
1169 void DumpWriter::write_classID(Klass* k) {
1170   write_objectID(k->java_mirror());
1171 }
1172 
1173 void DumpWriter::finish_dump_segment() {
1174   if (_in_dump_segment) {
1175     assert(_sub_record_left == 0, "Last sub-record not written completely");
1176     assert(_sub_record_ended, "sub-record must have ended");
1177 
1178     // Fix up the dump segment length if we haven't written a huge sub-record last
1179     // (in which case the segment length was already set to the correct value initially).
1180     if (!_is_huge_sub_record) {
1181       assert(position() > dump_segment_header_size, "Dump segment should have some content");
1182       Bytes::put_Java_u4((address) (buffer() + 5), (u4) (position() - dump_segment_header_size));
1183     }
1184 
1185     flush();
1186     _in_dump_segment = false;
1187   }
1188 }
1189 
1190 void DumpWriter::start_sub_record(u1 tag, u4 len) {
1191   if (!_in_dump_segment) {
1192     if (position() > 0) {
1193       flush();
1194     }
1195 
1196     assert(position() == 0, "Must be at the start");
1197 
1198     write_u1(HPROF_HEAP_DUMP_SEGMENT);
1199     write_u4(0); // timestamp
1200     // Will be fixed up later if we add more sub-records.  If this is a huge sub-record,
1201     // this is already the correct length, since we don't add more sub-records.
1202     write_u4(len);
1203     _in_dump_segment = true;
1204     _is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
1205   } else if (_is_huge_sub_record || (len > buffer_size() - position())) {
1206     // This object will not fit in completely or the last sub-record was huge.
1207     // Finish the current segement and try again.
1208     finish_dump_segment();
1209     start_sub_record(tag, len);
1210 
1211     return;
1212   }
1213 
1214   debug_only(_sub_record_left = len);
1215   debug_only(_sub_record_ended = false);
1216 
1217   write_u1(tag);
1218 }
1219 
1220 void DumpWriter::end_sub_record() {
1221   assert(_in_dump_segment, "must be in dump segment");
1222   assert(_sub_record_left == 0, "sub-record not written completely");
1223   assert(!_sub_record_ended, "Must not have ended yet");
1224   debug_only(_sub_record_ended = true);
1225 }
1226 
1227 // Support class with a collection of functions used when dumping the heap
1228 
1229 class DumperSupport : AllStatic {
1230  public:
1231 
1232   // write a header of the given type
1233   static void write_header(DumpWriter* writer, hprofTag tag, u4 len);
1234 
1235   // returns hprof tag for the given type signature
1236   static hprofTag sig2tag(Symbol* sig);
1237   // returns hprof tag for the given basic type
1238   static hprofTag type2tag(BasicType type);
1239   // Returns the size of the data to write.
1240   static u4 sig2size(Symbol* sig);
1241 
1242   // returns the size of the instance of the given class
1243   static u4 instance_size(Klass* k);
1244 
1245   // dump a jfloat
1246   static void dump_float(DumpWriter* writer, jfloat f);
1247   // dump a jdouble
1248   static void dump_double(DumpWriter* writer, jdouble d);
1249   // dumps the raw value of the given field
1250   static void dump_field_value(DumpWriter* writer, char type, oop obj, int offset);
1251   // returns the size of the static fields; also counts the static fields
1252   static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count);
1253   // dumps static fields of the given class
1254   static void dump_static_fields(DumpWriter* writer, Klass* k);
1255   // dump the raw values of the instance fields of the given object
1256   static void dump_instance_fields(DumpWriter* writer, oop o);
1257   // get the count of the instance fields for a given class
1258   static u2 get_instance_fields_count(InstanceKlass* ik);
1259   // dumps the definition of the instance fields for a given class
1260   static void dump_instance_field_descriptors(DumpWriter* writer, Klass* k);
1261   // creates HPROF_GC_INSTANCE_DUMP record for the given object
1262   static void dump_instance(DumpWriter* writer, oop o);
1263   // creates HPROF_GC_CLASS_DUMP record for the given class and each of its
1264   // array classes
1265   static void dump_class_and_array_classes(DumpWriter* writer, Klass* k);
1266   // creates HPROF_GC_CLASS_DUMP record for a given primitive array
1267   // class (and each multi-dimensional array class too)
1268   static void dump_basic_type_array_class(DumpWriter* writer, Klass* k);
1269 
1270   // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1271   static void dump_object_array(DumpWriter* writer, objArrayOop array);
1272   // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1273   static void dump_prim_array(DumpWriter* writer, typeArrayOop array);
1274   // create HPROF_FRAME record for the given method and bci
1275   static void dump_stack_frame(DumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
1276 
1277   // check if we need to truncate an array
1278   static int calculate_array_max_length(DumpWriter* writer, arrayOop array, short header_size);
1279 
1280   // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
1281   static void end_of_dump(DumpWriter* writer);
1282 
1283   static oop mask_dormant_archived_object(oop o) {
1284     if (o != NULL && o->klass()->java_mirror() == NULL) {
1285       // Ignore this object since the corresponding java mirror is not loaded.
1286       // Might be a dormant archive object.
1287       return NULL;
1288     } else {
1289       return o;
1290     }
1291   }
1292 };
1293 
1294 // write a header of the given type
1295 void DumperSupport:: write_header(DumpWriter* writer, hprofTag tag, u4 len) {
1296   writer->write_u1((u1)tag);
1297   writer->write_u4(0);                  // current ticks
1298   writer->write_u4(len);
1299 }
1300 
1301 // returns hprof tag for the given type signature
1302 hprofTag DumperSupport::sig2tag(Symbol* sig) {
1303   switch (sig->char_at(0)) {
1304     case JVM_SIGNATURE_CLASS    : return HPROF_NORMAL_OBJECT;
1305     case JVM_SIGNATURE_ARRAY    : return HPROF_NORMAL_OBJECT;
1306     case JVM_SIGNATURE_BYTE     : return HPROF_BYTE;
1307     case JVM_SIGNATURE_CHAR     : return HPROF_CHAR;
1308     case JVM_SIGNATURE_FLOAT    : return HPROF_FLOAT;
1309     case JVM_SIGNATURE_DOUBLE   : return HPROF_DOUBLE;
1310     case JVM_SIGNATURE_INT      : return HPROF_INT;
1311     case JVM_SIGNATURE_LONG     : return HPROF_LONG;
1312     case JVM_SIGNATURE_SHORT    : return HPROF_SHORT;
1313     case JVM_SIGNATURE_BOOLEAN  : return HPROF_BOOLEAN;
1314     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1315   }
1316 }
1317 
1318 hprofTag DumperSupport::type2tag(BasicType type) {
1319   switch (type) {
1320     case T_BYTE     : return HPROF_BYTE;
1321     case T_CHAR     : return HPROF_CHAR;
1322     case T_FLOAT    : return HPROF_FLOAT;
1323     case T_DOUBLE   : return HPROF_DOUBLE;
1324     case T_INT      : return HPROF_INT;
1325     case T_LONG     : return HPROF_LONG;
1326     case T_SHORT    : return HPROF_SHORT;
1327     case T_BOOLEAN  : return HPROF_BOOLEAN;
1328     default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
1329   }
1330 }
1331 
1332 u4 DumperSupport::sig2size(Symbol* sig) {
1333   switch (sig->char_at(0)) {
1334     case JVM_SIGNATURE_CLASS:
1335     case JVM_SIGNATURE_ARRAY: return sizeof(address);
1336     case JVM_SIGNATURE_BOOLEAN:
1337     case JVM_SIGNATURE_BYTE: return 1;
1338     case JVM_SIGNATURE_SHORT:
1339     case JVM_SIGNATURE_CHAR: return 2;
1340     case JVM_SIGNATURE_INT:
1341     case JVM_SIGNATURE_FLOAT: return 4;
1342     case JVM_SIGNATURE_LONG:
1343     case JVM_SIGNATURE_DOUBLE: return 8;
1344     default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
1345   }
1346 }
1347 
1348 // dump a jfloat
1349 void DumperSupport::dump_float(DumpWriter* writer, jfloat f) {
1350   if (g_isnan(f)) {
1351     writer->write_u4(0x7fc00000);    // collapsing NaNs
1352   } else {
1353     union {
1354       int i;
1355       float f;
1356     } u;
1357     u.f = (float)f;
1358     writer->write_u4((u4)u.i);
1359   }
1360 }
1361 
1362 // dump a jdouble
1363 void DumperSupport::dump_double(DumpWriter* writer, jdouble d) {
1364   union {
1365     jlong l;
1366     double d;
1367   } u;
1368   if (g_isnan(d)) {                 // collapsing NaNs
1369     u.l = (jlong)(0x7ff80000);
1370     u.l = (u.l << 32);
1371   } else {
1372     u.d = (double)d;
1373   }
1374   writer->write_u8((u8)u.l);
1375 }
1376 
1377 // dumps the raw value of the given field
1378 void DumperSupport::dump_field_value(DumpWriter* writer, char type, oop obj, int offset) {
1379   switch (type) {
1380     case JVM_SIGNATURE_CLASS :
1381     case JVM_SIGNATURE_ARRAY : {
1382       oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset);
1383       if (o != NULL && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == NULL) {
1384         ResourceMark rm;
1385         log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1386                              p2i(o), o->klass()->external_name(),
1387                              p2i(obj), obj->klass()->external_name());
1388       }
1389       o = mask_dormant_archived_object(o);
1390       assert(oopDesc::is_oop_or_null(o), "Expected an oop or NULL at " PTR_FORMAT, p2i(o));
1391       writer->write_objectID(o);
1392       break;
1393     }
1394     case JVM_SIGNATURE_BYTE : {
1395       jbyte b = obj->byte_field(offset);
1396       writer->write_u1((u1)b);
1397       break;
1398     }
1399     case JVM_SIGNATURE_CHAR : {
1400       jchar c = obj->char_field(offset);
1401       writer->write_u2((u2)c);
1402       break;
1403     }
1404     case JVM_SIGNATURE_SHORT : {
1405       jshort s = obj->short_field(offset);
1406       writer->write_u2((u2)s);
1407       break;
1408     }
1409     case JVM_SIGNATURE_FLOAT : {
1410       jfloat f = obj->float_field(offset);
1411       dump_float(writer, f);
1412       break;
1413     }
1414     case JVM_SIGNATURE_DOUBLE : {
1415       jdouble d = obj->double_field(offset);
1416       dump_double(writer, d);
1417       break;
1418     }
1419     case JVM_SIGNATURE_INT : {
1420       jint i = obj->int_field(offset);
1421       writer->write_u4((u4)i);
1422       break;
1423     }
1424     case JVM_SIGNATURE_LONG : {
1425       jlong l = obj->long_field(offset);
1426       writer->write_u8((u8)l);
1427       break;
1428     }
1429     case JVM_SIGNATURE_BOOLEAN : {
1430       jboolean b = obj->bool_field(offset);
1431       writer->write_u1((u1)b);
1432       break;
1433     }
1434     default : {
1435       ShouldNotReachHere();
1436       break;
1437     }
1438   }
1439 }
1440 
1441 // returns the size of the instance of the given class
1442 u4 DumperSupport::instance_size(Klass* k) {
1443   HandleMark hm;
1444   InstanceKlass* ik = InstanceKlass::cast(k);
1445   u4 size = 0;
1446 
1447   for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1448     if (!fld.access_flags().is_static()) {
1449       size += sig2size(fld.signature());
1450     }
1451   }
1452   return size;
1453 }
1454 
1455 u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
1456   HandleMark hm;
1457   field_count = 0;
1458   u4 size = 0;
1459 
1460   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1461     if (fldc.access_flags().is_static()) {
1462       field_count++;
1463       size += sig2size(fldc.signature());
1464     }
1465   }
1466 
1467   // Add in resolved_references which is referenced by the cpCache
1468   // The resolved_references is an array per InstanceKlass holding the
1469   // strings and other oops resolved from the constant pool.
1470   oop resolved_references = ik->constants()->resolved_references_or_null();
1471   if (resolved_references != NULL) {
1472     field_count++;
1473     size += sizeof(address);
1474 
1475     // Add in the resolved_references of the used previous versions of the class
1476     // in the case of RedefineClasses
1477     InstanceKlass* prev = ik->previous_versions();
1478     while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
1479       field_count++;
1480       size += sizeof(address);
1481       prev = prev->previous_versions();
1482     }
1483   }
1484 
1485   // Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
1486   // arrays.
1487   oop init_lock = ik->init_lock();
1488   if (init_lock != NULL) {
1489     field_count++;
1490     size += sizeof(address);
1491   }
1492 
1493   // We write the value itself plus a name and a one byte type tag per field.
1494   return size + field_count * (sizeof(address) + 1);
1495 }
1496 
1497 // dumps static fields of the given class
1498 void DumperSupport::dump_static_fields(DumpWriter* writer, Klass* k) {
1499   HandleMark hm;
1500   InstanceKlass* ik = InstanceKlass::cast(k);
1501 
1502   // dump the field descriptors and raw values
1503   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1504     if (fld.access_flags().is_static()) {
1505       Symbol* sig = fld.signature();
1506 
1507       writer->write_symbolID(fld.name());   // name
1508       writer->write_u1(sig2tag(sig));       // type
1509 
1510       // value
1511       dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
1512     }
1513   }
1514 
1515   // Add resolved_references for each class that has them
1516   oop resolved_references = ik->constants()->resolved_references_or_null();
1517   if (resolved_references != NULL) {
1518     writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1519     writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1520     writer->write_objectID(resolved_references);
1521 
1522     // Also write any previous versions
1523     InstanceKlass* prev = ik->previous_versions();
1524     while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
1525       writer->write_symbolID(vmSymbols::resolved_references_name());  // name
1526       writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
1527       writer->write_objectID(prev->constants()->resolved_references());
1528       prev = prev->previous_versions();
1529     }
1530   }
1531 
1532   // Add init lock to the end if the class is not yet initialized
1533   oop init_lock = ik->init_lock();
1534   if (init_lock != NULL) {
1535     writer->write_symbolID(vmSymbols::init_lock_name());         // name
1536     writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
1537     writer->write_objectID(init_lock);
1538   }
1539 }
1540 
1541 // dump the raw values of the instance fields of the given object
1542 void DumperSupport::dump_instance_fields(DumpWriter* writer, oop o) {
1543   HandleMark hm;
1544   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1545 
1546   for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
1547     if (!fld.access_flags().is_static()) {
1548       Symbol* sig = fld.signature();
1549       dump_field_value(writer, sig->char_at(0), o, fld.offset());
1550     }
1551   }
1552 }
1553 
1554 // dumps the definition of the instance fields for a given class
1555 u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
1556   HandleMark hm;
1557   u2 field_count = 0;
1558 
1559   for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
1560     if (!fldc.access_flags().is_static()) field_count++;
1561   }
1562 
1563   return field_count;
1564 }
1565 
1566 // dumps the definition of the instance fields for a given class
1567 void DumperSupport::dump_instance_field_descriptors(DumpWriter* writer, Klass* k) {
1568   HandleMark hm;
1569   InstanceKlass* ik = InstanceKlass::cast(k);
1570 
1571   // dump the field descriptors
1572   for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
1573     if (!fld.access_flags().is_static()) {
1574       Symbol* sig = fld.signature();
1575 
1576       writer->write_symbolID(fld.name());   // name
1577       writer->write_u1(sig2tag(sig));       // type
1578     }
1579   }
1580 }
1581 
1582 // creates HPROF_GC_INSTANCE_DUMP record for the given object
1583 void DumperSupport::dump_instance(DumpWriter* writer, oop o) {
1584   InstanceKlass* ik = InstanceKlass::cast(o->klass());
1585   u4 is = instance_size(ik);
1586   u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
1587 
1588   writer->start_sub_record(HPROF_GC_INSTANCE_DUMP, size);
1589   writer->write_objectID(o);
1590   writer->write_u4(STACK_TRACE_ID);
1591 
1592   // class ID
1593   writer->write_classID(ik);
1594 
1595   // number of bytes that follow
1596   writer->write_u4(is);
1597 
1598   // field values
1599   dump_instance_fields(writer, o);
1600 
1601   writer->end_sub_record();
1602 }
1603 
1604 // creates HPROF_GC_CLASS_DUMP record for the given class and each of
1605 // its array classes
1606 void DumperSupport::dump_class_and_array_classes(DumpWriter* writer, Klass* k) {
1607   InstanceKlass* ik = InstanceKlass::cast(k);
1608 
1609   // We can safepoint and do a heap dump at a point where we have a Klass,
1610   // but no java mirror class has been setup for it. So we need to check
1611   // that the class is at least loaded, to avoid crash from a null mirror.
1612   if (!ik->is_loaded()) {
1613     return;
1614   }
1615 
1616   u2 static_fields_count = 0;
1617   u4 static_size = get_static_fields_size(ik, static_fields_count);
1618   u2 instance_fields_count = get_instance_fields_count(ik);
1619   u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
1620   u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size;
1621 
1622   writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1623 
1624   // class ID
1625   writer->write_classID(ik);
1626   writer->write_u4(STACK_TRACE_ID);
1627 
1628   // super class ID
1629   InstanceKlass* java_super = ik->java_super();
1630   if (java_super == NULL) {
1631     writer->write_objectID(oop(NULL));
1632   } else {
1633     writer->write_classID(java_super);
1634   }
1635 
1636   writer->write_objectID(ik->class_loader());
1637   writer->write_objectID(ik->signers());
1638   writer->write_objectID(ik->protection_domain());
1639 
1640   // reserved
1641   writer->write_objectID(oop(NULL));
1642   writer->write_objectID(oop(NULL));
1643 
1644   // instance size
1645   writer->write_u4(DumperSupport::instance_size(ik));
1646 
1647   // size of constant pool - ignored by HAT 1.1
1648   writer->write_u2(0);
1649 
1650   // static fields
1651   writer->write_u2(static_fields_count);
1652   dump_static_fields(writer, ik);
1653 
1654   // description of instance fields
1655   writer->write_u2(instance_fields_count);
1656   dump_instance_field_descriptors(writer, ik);
1657 
1658   writer->end_sub_record();
1659 
1660   // array classes
1661   k = ik->array_klass_or_null();
1662   while (k != NULL) {
1663     assert(k->is_objArray_klass(), "not an ObjArrayKlass");
1664 
1665     u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1666     writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1667     writer->write_classID(k);
1668     writer->write_u4(STACK_TRACE_ID);
1669 
1670     // super class of array classes is java.lang.Object
1671     java_super = k->java_super();
1672     assert(java_super != NULL, "checking");
1673     writer->write_classID(java_super);
1674 
1675     writer->write_objectID(ik->class_loader());
1676     writer->write_objectID(ik->signers());
1677     writer->write_objectID(ik->protection_domain());
1678 
1679     writer->write_objectID(oop(NULL));    // reserved
1680     writer->write_objectID(oop(NULL));
1681     writer->write_u4(0);             // instance size
1682     writer->write_u2(0);             // constant pool
1683     writer->write_u2(0);             // static fields
1684     writer->write_u2(0);             // instance fields
1685 
1686     writer->end_sub_record();
1687 
1688     // get the array class for the next rank
1689     k = k->array_klass_or_null();
1690   }
1691 }
1692 
1693 // creates HPROF_GC_CLASS_DUMP record for a given primitive array
1694 // class (and each multi-dimensional array class too)
1695 void DumperSupport::dump_basic_type_array_class(DumpWriter* writer, Klass* k) {
1696  // array classes
1697  while (k != NULL) {
1698     Klass* klass = k;
1699 
1700     u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + 2;
1701     writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
1702     writer->write_classID(klass);
1703     writer->write_u4(STACK_TRACE_ID);
1704 
1705     // super class of array classes is java.lang.Object
1706     InstanceKlass* java_super = klass->java_super();
1707     assert(java_super != NULL, "checking");
1708     writer->write_classID(java_super);
1709 
1710     writer->write_objectID(oop(NULL));    // loader
1711     writer->write_objectID(oop(NULL));    // signers
1712     writer->write_objectID(oop(NULL));    // protection domain
1713 
1714     writer->write_objectID(oop(NULL));    // reserved
1715     writer->write_objectID(oop(NULL));
1716     writer->write_u4(0);             // instance size
1717     writer->write_u2(0);             // constant pool
1718     writer->write_u2(0);             // static fields
1719     writer->write_u2(0);             // instance fields
1720 
1721     writer->end_sub_record();
1722 
1723     // get the array class for the next rank
1724     k = klass->array_klass_or_null();
1725   }
1726 }
1727 
1728 // Hprof uses an u4 as record length field,
1729 // which means we need to truncate arrays that are too long.
1730 int DumperSupport::calculate_array_max_length(DumpWriter* writer, arrayOop array, short header_size) {
1731   BasicType type = ArrayKlass::cast(array->klass())->element_type();
1732   assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
1733 
1734   int length = array->length();
1735 
1736   int type_size;
1737   if (type == T_OBJECT) {
1738     type_size = sizeof(address);
1739   } else {
1740     type_size = type2aelembytes(type);
1741   }
1742 
1743   size_t length_in_bytes = (size_t)length * type_size;
1744   uint max_bytes = max_juint - header_size;
1745 
1746   if (length_in_bytes > max_bytes) {
1747     length = max_bytes / type_size;
1748     length_in_bytes = (size_t)length * type_size;
1749 
1750     warning("cannot dump array of type %s[] with length %d; truncating to length %d",
1751             type2name_tab[type], array->length(), length);
1752   }
1753   return length;
1754 }
1755 
1756 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
1757 void DumperSupport::dump_object_array(DumpWriter* writer, objArrayOop array) {
1758   // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
1759   short header_size = 1 + 2 * 4 + 2 * sizeof(address);
1760   int length = calculate_array_max_length(writer, array, header_size);
1761   u4 size = header_size + length * sizeof(address);
1762 
1763   writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
1764   writer->write_objectID(array);
1765   writer->write_u4(STACK_TRACE_ID);
1766   writer->write_u4(length);
1767 
1768   // array class ID
1769   writer->write_classID(array->klass());
1770 
1771   // [id]* elements
1772   for (int index = 0; index < length; index++) {
1773     oop o = array->obj_at(index);
1774     if (o != NULL && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == NULL) {
1775       ResourceMark rm;
1776       log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
1777                            p2i(o), o->klass()->external_name(),
1778                            p2i(array), array->klass()->external_name());
1779     }
1780     o = mask_dormant_archived_object(o);
1781     writer->write_objectID(o);
1782   }
1783 
1784   writer->end_sub_record();
1785 }
1786 
1787 #define WRITE_ARRAY(Array, Type, Size, Length) \
1788   for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
1789 
1790 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
1791 void DumperSupport::dump_prim_array(DumpWriter* writer, typeArrayOop array) {
1792   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
1793 
1794   // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
1795   short header_size = 2 * 1 + 2 * 4 + sizeof(address);
1796 
1797   int length = calculate_array_max_length(writer, array, header_size);
1798   int type_size = type2aelembytes(type);
1799   u4 length_in_bytes = (u4)length * type_size;
1800   u4 size = header_size + length_in_bytes;
1801 
1802   writer->start_sub_record(HPROF_GC_PRIM_ARRAY_DUMP, size);
1803   writer->write_objectID(array);
1804   writer->write_u4(STACK_TRACE_ID);
1805   writer->write_u4(length);
1806   writer->write_u1(type2tag(type));
1807 
1808   // nothing to copy
1809   if (length == 0) {
1810     writer->end_sub_record();
1811     return;
1812   }
1813 
1814   // If the byte ordering is big endian then we can copy most types directly
1815 
1816   switch (type) {
1817     case T_INT : {
1818       if (Endian::is_Java_byte_ordering_different()) {
1819         WRITE_ARRAY(array, int, u4, length);
1820       } else {
1821         writer->write_raw((void*)(array->int_at_addr(0)), length_in_bytes);
1822       }
1823       break;
1824     }
1825     case T_BYTE : {
1826       writer->write_raw((void*)(array->byte_at_addr(0)), length_in_bytes);
1827       break;
1828     }
1829     case T_CHAR : {
1830       if (Endian::is_Java_byte_ordering_different()) {
1831         WRITE_ARRAY(array, char, u2, length);
1832       } else {
1833         writer->write_raw((void*)(array->char_at_addr(0)), length_in_bytes);
1834       }
1835       break;
1836     }
1837     case T_SHORT : {
1838       if (Endian::is_Java_byte_ordering_different()) {
1839         WRITE_ARRAY(array, short, u2, length);
1840       } else {
1841         writer->write_raw((void*)(array->short_at_addr(0)), length_in_bytes);
1842       }
1843       break;
1844     }
1845     case T_BOOLEAN : {
1846       if (Endian::is_Java_byte_ordering_different()) {
1847         WRITE_ARRAY(array, bool, u1, length);
1848       } else {
1849         writer->write_raw((void*)(array->bool_at_addr(0)), length_in_bytes);
1850       }
1851       break;
1852     }
1853     case T_LONG : {
1854       if (Endian::is_Java_byte_ordering_different()) {
1855         WRITE_ARRAY(array, long, u8, length);
1856       } else {
1857         writer->write_raw((void*)(array->long_at_addr(0)), length_in_bytes);
1858       }
1859       break;
1860     }
1861 
1862     // handle float/doubles in a special value to ensure than NaNs are
1863     // written correctly. TO DO: Check if we can avoid this on processors that
1864     // use IEEE 754.
1865 
1866     case T_FLOAT : {
1867       for (int i = 0; i < length; i++) {
1868         dump_float(writer, array->float_at(i));
1869       }
1870       break;
1871     }
1872     case T_DOUBLE : {
1873       for (int i = 0; i < length; i++) {
1874         dump_double(writer, array->double_at(i));
1875       }
1876       break;
1877     }
1878     default : ShouldNotReachHere();
1879   }
1880 
1881   writer->end_sub_record();
1882 }
1883 
1884 // create a HPROF_FRAME record of the given Method* and bci
1885 void DumperSupport::dump_stack_frame(DumpWriter* writer,
1886                                      int frame_serial_num,
1887                                      int class_serial_num,
1888                                      Method* m,
1889                                      int bci) {
1890   int line_number;
1891   if (m->is_native()) {
1892     line_number = -3;  // native frame
1893   } else {
1894     line_number = m->line_number_from_bci(bci);
1895   }
1896 
1897   write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
1898   writer->write_id(frame_serial_num);               // frame serial number
1899   writer->write_symbolID(m->name());                // method's name
1900   writer->write_symbolID(m->signature());           // method's signature
1901 
1902   assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
1903   writer->write_symbolID(m->method_holder()->source_file_name());  // source file name
1904   writer->write_u4(class_serial_num);               // class serial number
1905   writer->write_u4((u4) line_number);               // line number
1906 }
1907 
1908 
1909 // Support class used to generate HPROF_UTF8 records from the entries in the
1910 // SymbolTable.
1911 
1912 class SymbolTableDumper : public SymbolClosure {
1913  private:
1914   DumpWriter* _writer;
1915   DumpWriter* writer() const                { return _writer; }
1916  public:
1917   SymbolTableDumper(DumpWriter* writer)     { _writer = writer; }
1918   void do_symbol(Symbol** p);
1919 };
1920 
1921 void SymbolTableDumper::do_symbol(Symbol** p) {
1922   ResourceMark rm;
1923   Symbol* sym = load_symbol(p);
1924   int len = sym->utf8_length();
1925   if (len > 0) {
1926     char* s = sym->as_utf8();
1927     DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
1928     writer()->write_symbolID(sym);
1929     writer()->write_raw(s, len);
1930   }
1931 }
1932 
1933 // Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
1934 
1935 class JNILocalsDumper : public OopClosure {
1936  private:
1937   DumpWriter* _writer;
1938   u4 _thread_serial_num;
1939   int _frame_num;
1940   DumpWriter* writer() const                { return _writer; }
1941  public:
1942   JNILocalsDumper(DumpWriter* writer, u4 thread_serial_num) {
1943     _writer = writer;
1944     _thread_serial_num = thread_serial_num;
1945     _frame_num = -1;  // default - empty stack
1946   }
1947   void set_frame_number(int n) { _frame_num = n; }
1948   void do_oop(oop* obj_p);
1949   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
1950 };
1951 
1952 
1953 void JNILocalsDumper::do_oop(oop* obj_p) {
1954   // ignore null handles
1955   oop o = *obj_p;
1956   if (o != NULL) {
1957     u4 size = 1 + sizeof(address) + 4 + 4;
1958     writer()->start_sub_record(HPROF_GC_ROOT_JNI_LOCAL, size);
1959     writer()->write_objectID(o);
1960     writer()->write_u4(_thread_serial_num);
1961     writer()->write_u4((u4)_frame_num);
1962     writer()->end_sub_record();
1963   }
1964 }
1965 
1966 
1967 // Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
1968 
1969 class JNIGlobalsDumper : public OopClosure {
1970  private:
1971   DumpWriter* _writer;
1972   DumpWriter* writer() const                { return _writer; }
1973 
1974  public:
1975   JNIGlobalsDumper(DumpWriter* writer) {
1976     _writer = writer;
1977   }
1978   void do_oop(oop* obj_p);
1979   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
1980 };
1981 
1982 void JNIGlobalsDumper::do_oop(oop* obj_p) {
1983   oop o = *obj_p;
1984 
1985   // ignore these
1986   if (o == NULL) return;
1987 
1988   // we ignore global ref to symbols and other internal objects
1989   if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
1990     u4 size = 1 + 2 * sizeof(address);
1991     writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
1992     writer()->write_objectID(o);
1993     writer()->write_objectID((oopDesc*)obj_p);      // global ref ID
1994     writer()->end_sub_record();
1995   }
1996 };
1997 
1998 
1999 // Support class used to generate HPROF_GC_ROOT_MONITOR_USED records
2000 
2001 class MonitorUsedDumper : public OopClosure {
2002  private:
2003   DumpWriter* _writer;
2004   DumpWriter* writer() const                { return _writer; }
2005  public:
2006   MonitorUsedDumper(DumpWriter* writer) {
2007     _writer = writer;
2008   }
2009   void do_oop(oop* obj_p) {
2010     u4 size = 1 + sizeof(address);
2011     writer()->start_sub_record(HPROF_GC_ROOT_MONITOR_USED, size);
2012     writer()->write_objectID(*obj_p);
2013     writer()->end_sub_record();
2014   }
2015   void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
2016 };
2017 
2018 
2019 // Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
2020 
2021 class StickyClassDumper : public KlassClosure {
2022  private:
2023   DumpWriter* _writer;
2024   DumpWriter* writer() const                { return _writer; }
2025  public:
2026   StickyClassDumper(DumpWriter* writer) {
2027     _writer = writer;
2028   }
2029   void do_klass(Klass* k) {
2030     if (k->is_instance_klass()) {
2031       InstanceKlass* ik = InstanceKlass::cast(k);
2032       u4 size = 1 + sizeof(address);
2033       writer()->start_sub_record(HPROF_GC_ROOT_STICKY_CLASS, size);
2034       writer()->write_classID(ik);
2035       writer()->end_sub_record();
2036     }
2037   }
2038 };
2039 
2040 
2041 class VM_HeapDumper;
2042 
2043 // Support class using when iterating over the heap.
2044 
2045 class HeapObjectDumper : public ObjectClosure {
2046  private:
2047   VM_HeapDumper* _dumper;
2048   DumpWriter* _writer;
2049 
2050   VM_HeapDumper* dumper()               { return _dumper; }
2051   DumpWriter* writer()                  { return _writer; }
2052 
2053  public:
2054   HeapObjectDumper(VM_HeapDumper* dumper, DumpWriter* writer) {
2055     _dumper = dumper;
2056     _writer = writer;
2057   }
2058 
2059   // called for each object in the heap
2060   void do_object(oop o);
2061 };
2062 
2063 void HeapObjectDumper::do_object(oop o) {
2064   // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
2065   if (o->klass() == SystemDictionary::Class_klass()) {
2066     if (!java_lang_Class::is_primitive(o)) {
2067       return;
2068     }
2069   }
2070 
2071   if (DumperSupport::mask_dormant_archived_object(o) == NULL) {
2072     log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s)", p2i(o), o->klass()->external_name());
2073     return;
2074   }
2075 
2076   if (o->is_instance()) {
2077     // create a HPROF_GC_INSTANCE record for each object
2078     DumperSupport::dump_instance(writer(), o);
2079   } else if (o->is_objArray()) {
2080     // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
2081     DumperSupport::dump_object_array(writer(), objArrayOop(o));
2082   } else if (o->is_typeArray()) {
2083     // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
2084     DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
2085   }
2086 }
2087 
2088 // The VM operation that performs the heap dump
2089 class VM_HeapDumper : public VM_GC_Operation, public AbstractGangTask {
2090  private:
2091   static VM_HeapDumper* _global_dumper;
2092   static DumpWriter*    _global_writer;
2093   DumpWriter*           _local_writer;
2094   JavaThread*           _oome_thread;
2095   Method*               _oome_constructor;
2096   bool _gc_before_heap_dump;
2097   GrowableArray<Klass*>* _klass_map;
2098   ThreadStackTrace** _stack_traces;
2099   int _num_threads;
2100 
2101   // accessors and setters
2102   static VM_HeapDumper* dumper()         {  assert(_global_dumper != NULL, "Error"); return _global_dumper; }
2103   static DumpWriter* writer()            {  assert(_global_writer != NULL, "Error"); return _global_writer; }
2104   void set_global_dumper() {
2105     assert(_global_dumper == NULL, "Error");
2106     _global_dumper = this;
2107   }
2108   void set_global_writer() {
2109     assert(_global_writer == NULL, "Error");
2110     _global_writer = _local_writer;
2111   }
2112   void clear_global_dumper() { _global_dumper = NULL; }
2113   void clear_global_writer() { _global_writer = NULL; }
2114 
2115   bool skip_operation() const;
2116 
2117   // writes a HPROF_LOAD_CLASS record
2118   class ClassesDo;
2119   static void do_load_class(Klass* k);
2120 
2121   // writes a HPROF_GC_CLASS_DUMP record for the given class
2122   // (and each array class too)
2123   static void do_class_dump(Klass* k);
2124 
2125   // writes a HPROF_GC_CLASS_DUMP records for a given basic type
2126   // array (and each multi-dimensional array too)
2127   static void do_basic_type_array_class_dump(Klass* k);
2128 
2129   // HPROF_GC_ROOT_THREAD_OBJ records
2130   int do_thread(JavaThread* thread, u4 thread_serial_num);
2131   void do_threads();
2132 
2133   void add_class_serial_number(Klass* k, int serial_num) {
2134     _klass_map->at_put_grow(serial_num, k);
2135   }
2136 
2137   // HPROF_TRACE and HPROF_FRAME records
2138   void dump_stack_traces();
2139 
2140  public:
2141   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome) :
2142     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
2143                     GCCause::_heap_dump /* GC Cause */,
2144                     0 /* total full collections, dummy, ignored */,
2145                     gc_before_heap_dump),
2146     AbstractGangTask("dump heap") {
2147     _local_writer = writer;
2148     _gc_before_heap_dump = gc_before_heap_dump;
2149     _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
2150     _stack_traces = NULL;
2151     _num_threads = 0;
2152     if (oome) {
2153       assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
2154       // get OutOfMemoryError zero-parameter constructor
2155       InstanceKlass* oome_ik = SystemDictionary::OutOfMemoryError_klass();
2156       _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
2157                                                           vmSymbols::void_method_signature());
2158       // get thread throwing OOME when generating the heap dump at OOME
2159       _oome_thread = JavaThread::current();
2160     } else {
2161       _oome_thread = NULL;
2162       _oome_constructor = NULL;
2163     }
2164   }
2165   ~VM_HeapDumper() {
2166     if (_stack_traces != NULL) {
2167       for (int i=0; i < _num_threads; i++) {
2168         delete _stack_traces[i];
2169       }
2170       FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces);
2171     }
2172     delete _klass_map;
2173   }
2174 
2175   VMOp_Type type() const { return VMOp_HeapDumper; }
2176   void doit();
2177   void work(uint worker_id);
2178 };
2179 
2180 
2181 VM_HeapDumper* VM_HeapDumper::_global_dumper = NULL;
2182 DumpWriter*    VM_HeapDumper::_global_writer = NULL;
2183 
2184 bool VM_HeapDumper::skip_operation() const {
2185   return false;
2186 }
2187 
2188 // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
2189 void DumperSupport::end_of_dump(DumpWriter* writer) {
2190   writer->finish_dump_segment();
2191 
2192   writer->write_u1(HPROF_HEAP_DUMP_END);
2193   writer->write_u4(0);
2194   writer->write_u4(0);
2195 }
2196 
2197 // writes a HPROF_LOAD_CLASS record for the class (and each of its
2198 // array classes)
2199 void VM_HeapDumper::do_load_class(Klass* k) {
2200   static u4 class_serial_num = 0;
2201 
2202   // len of HPROF_LOAD_CLASS record
2203   u4 remaining = 2*oopSize + 2*sizeof(u4);
2204 
2205   // write a HPROF_LOAD_CLASS for the class and each array class
2206   do {
2207     DumperSupport::write_header(writer(), HPROF_LOAD_CLASS, remaining);
2208 
2209     // class serial number is just a number
2210     writer()->write_u4(++class_serial_num);
2211 
2212     // class ID
2213     Klass* klass = k;
2214     writer()->write_classID(klass);
2215 
2216     // add the Klass* and class serial number pair
2217     dumper()->add_class_serial_number(klass, class_serial_num);
2218 
2219     writer()->write_u4(STACK_TRACE_ID);
2220 
2221     // class name ID
2222     Symbol* name = klass->name();
2223     writer()->write_symbolID(name);
2224 
2225     // write a LOAD_CLASS record for the array type (if it exists)
2226     k = klass->array_klass_or_null();
2227   } while (k != NULL);
2228 }
2229 
2230 // writes a HPROF_GC_CLASS_DUMP record for the given class
2231 void VM_HeapDumper::do_class_dump(Klass* k) {
2232   if (k->is_instance_klass()) {
2233     DumperSupport::dump_class_and_array_classes(writer(), k);
2234   }
2235 }
2236 
2237 // writes a HPROF_GC_CLASS_DUMP records for a given basic type
2238 // array (and each multi-dimensional array too)
2239 void VM_HeapDumper::do_basic_type_array_class_dump(Klass* k) {
2240   DumperSupport::dump_basic_type_array_class(writer(), k);
2241 }
2242 
2243 // Walk the stack of the given thread.
2244 // Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local
2245 // Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local
2246 //
2247 // It returns the number of Java frames in this thread stack
2248 int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
2249   JNILocalsDumper blk(writer(), thread_serial_num);
2250 
2251   oop threadObj = java_thread->threadObj();
2252   assert(threadObj != NULL, "sanity check");
2253 
2254   int stack_depth = 0;
2255   if (java_thread->has_last_Java_frame()) {
2256 
2257     // vframes are resource allocated
2258     Thread* current_thread = Thread::current();
2259     ResourceMark rm(current_thread);
2260     HandleMark hm(current_thread);
2261 
2262     RegisterMap reg_map(java_thread);
2263     frame f = java_thread->last_frame();
2264     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
2265     frame* last_entry_frame = NULL;
2266     int extra_frames = 0;
2267 
2268     if (java_thread == _oome_thread && _oome_constructor != NULL) {
2269       extra_frames++;
2270     }
2271     while (vf != NULL) {
2272       blk.set_frame_number(stack_depth);
2273       if (vf->is_java_frame()) {
2274 
2275         // java frame (interpreted, compiled, ...)
2276         javaVFrame *jvf = javaVFrame::cast(vf);
2277         if (!(jvf->method()->is_native())) {
2278           StackValueCollection* locals = jvf->locals();
2279           for (int slot=0; slot<locals->size(); slot++) {
2280             if (locals->at(slot)->type() == T_OBJECT) {
2281               oop o = locals->obj_at(slot)();
2282 
2283               if (o != NULL) {
2284                 u4 size = 1 + sizeof(address) + 4 + 4;
2285                 writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2286                 writer()->write_objectID(o);
2287                 writer()->write_u4(thread_serial_num);
2288                 writer()->write_u4((u4) (stack_depth + extra_frames));
2289                 writer()->end_sub_record();
2290               }
2291             }
2292           }
2293           StackValueCollection *exprs = jvf->expressions();
2294           for(int index = 0; index < exprs->size(); index++) {
2295             if (exprs->at(index)->type() == T_OBJECT) {
2296                oop o = exprs->obj_at(index)();
2297                if (o != NULL) {
2298                  u4 size = 1 + sizeof(address) + 4 + 4;
2299                  writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size);
2300                  writer()->write_objectID(o);
2301                  writer()->write_u4(thread_serial_num);
2302                  writer()->write_u4((u4) (stack_depth + extra_frames));
2303                  writer()->end_sub_record();
2304                }
2305              }
2306           }
2307         } else {
2308           // native frame
2309           if (stack_depth == 0) {
2310             // JNI locals for the top frame.
2311             java_thread->active_handles()->oops_do(&blk);
2312           } else {
2313             if (last_entry_frame != NULL) {
2314               // JNI locals for the entry frame
2315               assert(last_entry_frame->is_entry_frame(), "checking");
2316               last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk);
2317             }
2318           }
2319         }
2320         // increment only for Java frames
2321         stack_depth++;
2322         last_entry_frame = NULL;
2323 
2324       } else {
2325         // externalVFrame - if it's an entry frame then report any JNI locals
2326         // as roots when we find the corresponding native javaVFrame
2327         frame* fr = vf->frame_pointer();
2328         assert(fr != NULL, "sanity check");
2329         if (fr->is_entry_frame()) {
2330           last_entry_frame = fr;
2331         }
2332       }
2333       vf = vf->sender();
2334     }
2335   } else {
2336     // no last java frame but there may be JNI locals
2337     java_thread->active_handles()->oops_do(&blk);
2338   }
2339   return stack_depth;
2340 }
2341 
2342 
2343 // write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk
2344 // the stack so that locals and JNI locals are dumped.
2345 void VM_HeapDumper::do_threads() {
2346   for (int i=0; i < _num_threads; i++) {
2347     JavaThread* thread = _stack_traces[i]->thread();
2348     oop threadObj = thread->threadObj();
2349     u4 thread_serial_num = i+1;
2350     u4 stack_serial_num = thread_serial_num + STACK_TRACE_ID;
2351     u4 size = 1 + sizeof(address) + 4 + 4;
2352     writer()->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size);
2353     writer()->write_objectID(threadObj);
2354     writer()->write_u4(thread_serial_num);  // thread number
2355     writer()->write_u4(stack_serial_num);   // stack trace serial number
2356     writer()->end_sub_record();
2357     int num_frames = do_thread(thread, thread_serial_num);
2358     assert(num_frames == _stack_traces[i]->get_stack_depth(),
2359            "total number of Java frames not matched");
2360   }
2361 }
2362 
2363 
2364 // The VM operation that dumps the heap. The dump consists of the following
2365 // records:
2366 //
2367 //  HPROF_HEADER
2368 //  [HPROF_UTF8]*
2369 //  [HPROF_LOAD_CLASS]*
2370 //  [[HPROF_FRAME]*|HPROF_TRACE]*
2371 //  [HPROF_GC_CLASS_DUMP]*
2372 //  [HPROF_HEAP_DUMP_SEGMENT]*
2373 //  HPROF_HEAP_DUMP_END
2374 //
2375 // The HPROF_TRACE records represent the stack traces where the heap dump
2376 // is generated and a "dummy trace" record which does not include
2377 // any frames. The dummy trace record is used to be referenced as the
2378 // unknown object alloc site.
2379 //
2380 // Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
2381 // To allow the heap dump be generated in a single pass we remember the position
2382 // of the dump length and fix it up after all sub-records have been written.
2383 // To generate the sub-records we iterate over the heap, writing
2384 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
2385 // records as we go. Once that is done we write records for some of the GC
2386 // roots.
2387 
2388 void VM_HeapDumper::doit() {
2389 
2390   HandleMark hm;
2391   CollectedHeap* ch = Universe::heap();
2392 
2393   ch->ensure_parsability(false); // must happen, even if collection does
2394                                  // not happen (e.g. due to GCLocker)
2395 
2396   if (_gc_before_heap_dump) {
2397     if (GCLocker::is_active()) {
2398       warning("GC locker is held; pre-heapdump GC was skipped");
2399     } else {
2400       ch->collect_as_vm_thread(GCCause::_heap_dump);
2401     }
2402   }
2403 
2404   // At this point we should be the only dumper active, so
2405   // the following should be safe.
2406   set_global_dumper();
2407   set_global_writer();
2408 
2409   WorkGang* gang = UseShenandoahGC ? NULL : ch->get_safepoint_workers();
2410 
2411   if (gang == NULL) {
2412     work(0);
2413   } else {
2414     gang->run_task(this);
2415   }
2416 
2417   // Now we clear the global variables, so that a future dumper might run.
2418   clear_global_dumper();
2419   clear_global_writer();
2420 }
2421 
2422 void VM_HeapDumper::work(uint worker_id) {
2423   if (worker_id != 0) {
2424     writer()->writer_loop();
2425     return;
2426   }
2427 
2428   // Write the file header - we always use 1.0.
2429   const char* header = "JAVA PROFILE 1.0.2";
2430 
2431   // header is few bytes long - no chance to overflow int
2432   writer()->write_raw((void*)header, (int)strlen(header));
2433   writer()->write_u1(0); // terminator
2434   writer()->write_u4(oopSize);
2435   // timestamp is current time in ms
2436   writer()->write_u8(os::javaTimeMillis());
2437 
2438   // HPROF_UTF8 records
2439   SymbolTableDumper sym_dumper(writer());
2440   SymbolTable::symbols_do(&sym_dumper);
2441 
2442   // write HPROF_LOAD_CLASS records
2443   {
2444     LockedClassesDo locked_load_classes(&do_load_class);
2445     ClassLoaderDataGraph::classes_do(&locked_load_classes);
2446   }
2447   Universe::basic_type_classes_do(&do_load_class);
2448 
2449   // write HPROF_FRAME and HPROF_TRACE records
2450   // this must be called after _klass_map is built when iterating the classes above.
2451   dump_stack_traces();
2452 
2453   // Writes HPROF_GC_CLASS_DUMP records
2454   {
2455     LockedClassesDo locked_dump_class(&do_class_dump);
2456     ClassLoaderDataGraph::classes_do(&locked_dump_class);
2457   }
2458   Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
2459 
2460   // writes HPROF_GC_INSTANCE_DUMP records.
2461   // After each sub-record is written check_segment_length will be invoked
2462   // to check if the current segment exceeds a threshold. If so, a new
2463   // segment is started.
2464   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
2465   // of the heap dump.
2466   HeapObjectDumper obj_dumper(this, writer());
2467   Universe::heap()->object_iterate(&obj_dumper);
2468 
2469   // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
2470   do_threads();
2471 
2472   // HPROF_GC_ROOT_MONITOR_USED
2473   MonitorUsedDumper mon_dumper(writer());
2474   ObjectSynchronizer::oops_do(&mon_dumper);
2475 
2476   // HPROF_GC_ROOT_JNI_GLOBAL
2477   JNIGlobalsDumper jni_dumper(writer());
2478   JNIHandles::oops_do(&jni_dumper);
2479   Universe::oops_do(&jni_dumper);  // technically not jni roots, but global roots
2480                                    // for things like preallocated throwable backtraces
2481 
2482   // HPROF_GC_ROOT_STICKY_CLASS
2483   // These should be classes in the NULL class loader data, and not all classes
2484   // if !ClassUnloading
2485   StickyClassDumper class_dumper(writer());
2486   ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
2487 
2488   // Writes the HPROF_HEAP_DUMP_END record.
2489   DumperSupport::end_of_dump(writer());
2490 
2491   // We are done with writing. Release the worker threads.
2492   writer()->deactivate();
2493 }
2494 
2495 void VM_HeapDumper::dump_stack_traces() {
2496   // write a HPROF_TRACE record without any frames to be referenced as object alloc sites
2497   DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
2498   writer()->write_u4((u4) STACK_TRACE_ID);
2499   writer()->write_u4(0);                    // thread number
2500   writer()->write_u4(0);                    // frame count
2501 
2502   _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
2503   int frame_serial_num = 0;
2504   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
2505     oop threadObj = thread->threadObj();
2506     if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
2507       // dump thread stack trace
2508       ResourceMark rm;
2509       ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
2510       stack_trace->dump_stack_at_safepoint(-1);
2511       _stack_traces[_num_threads++] = stack_trace;
2512 
2513       // write HPROF_FRAME records for this thread's stack trace
2514       int depth = stack_trace->get_stack_depth();
2515       int thread_frame_start = frame_serial_num;
2516       int extra_frames = 0;
2517       // write fake frame that makes it look like the thread, which caused OOME,
2518       // is in the OutOfMemoryError zero-parameter constructor
2519       if (thread == _oome_thread && _oome_constructor != NULL) {
2520         int oome_serial_num = _klass_map->find(_oome_constructor->method_holder());
2521         // the class serial number starts from 1
2522         assert(oome_serial_num > 0, "OutOfMemoryError class not found");
2523         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
2524                                         _oome_constructor, 0);
2525         extra_frames++;
2526       }
2527       for (int j=0; j < depth; j++) {
2528         StackFrameInfo* frame = stack_trace->stack_frame_at(j);
2529         Method* m = frame->method();
2530         int class_serial_num = _klass_map->find(m->method_holder());
2531         // the class serial number starts from 1
2532         assert(class_serial_num > 0, "class not found");
2533         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
2534       }
2535       depth += extra_frames;
2536 
2537       // write HPROF_TRACE record for one thread
2538       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
2539       int stack_serial_num = _num_threads + STACK_TRACE_ID;
2540       writer()->write_u4(stack_serial_num);      // stack trace serial number
2541       writer()->write_u4((u4) _num_threads);     // thread serial number
2542       writer()->write_u4(depth);                 // frame count
2543       for (int j=1; j <= depth; j++) {
2544         writer()->write_id(thread_frame_start + j);
2545       }
2546     }
2547   }
2548 }
2549 
2550 // dump the heap to given path.
2551 int HeapDumper::dump(const char* path, outputStream* out, int compression) {
2552   assert(path != NULL && strlen(path) > 0, "path missing");
2553 
2554   // print message in interactive case
2555   if (out != NULL) {
2556     out->print_cr("Dumping heap to %s ...", path);
2557     timer()->start();
2558   }
2559 
2560   // create JFR event
2561   EventHeapDump event;
2562 
2563   AbstractCompressor* compressor = NULL;
2564 
2565   if (compression > 0) {
2566     compressor = new (std::nothrow) GZipComressor(compression);
2567 
2568     if (compressor == NULL) {
2569       set_error("Could not allocate gzip compressor");
2570       return -1;
2571     }
2572   }
2573 
2574   DumpWriter writer(new (std::nothrow) FileWriter(path), compressor);
2575 
2576   if (writer.error() != NULL) {
2577     set_error(writer.error());
2578     if (out != NULL) {
2579       out->print_cr("Unable to create %s: %s", path,
2580         (error() != NULL) ? error() : "reason unknown");
2581     }
2582     return -1;
2583   }
2584 
2585   // generate the dump
2586   VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome);
2587   if (Thread::current()->is_VM_thread()) {
2588     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
2589     dumper.doit();
2590   } else {
2591     VMThread::execute(&dumper);
2592   }
2593 
2594   // record any error that the writer may have encountered
2595   set_error(writer.error());
2596 
2597   // emit JFR event
2598   if (error() == NULL) {
2599     event.set_destination(path);
2600     event.set_gcBeforeDump(_gc_before_heap_dump);
2601     event.set_size(writer.bytes_written());
2602     event.set_onOutOfMemoryError(_oome);
2603     event.commit();
2604   }
2605 
2606   // print message in interactive case
2607   if (out != NULL) {
2608     timer()->stop();
2609     if (error() == NULL) {
2610       out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
2611                     writer.bytes_written(), timer()->seconds());
2612     } else {
2613       out->print_cr("Dump file is incomplete: %s", writer.error());
2614     }
2615   }
2616 
2617   return (writer.error() == NULL) ? 0 : -1;
2618 }
2619 
2620 // stop timer (if still active), and free any error string we might be holding
2621 HeapDumper::~HeapDumper() {
2622   if (timer()->is_active()) {
2623     timer()->stop();
2624   }
2625   set_error(NULL);
2626 }
2627 
2628 
2629 // returns the error string (resource allocated), or NULL
2630 char* HeapDumper::error_as_C_string() const {
2631   if (error() != NULL) {
2632     char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
2633     strcpy(str, error());
2634     return str;
2635   } else {
2636     return NULL;
2637   }
2638 }
2639 
2640 // set the error string
2641 void HeapDumper::set_error(char const* error) {
2642   if (_error != NULL) {
2643     os::free(_error);
2644   }
2645   if (error == NULL) {
2646     _error = NULL;
2647   } else {
2648     _error = os::strdup(error);
2649     assert(_error != NULL, "allocation failure");
2650   }
2651 }
2652 
2653 // Called by out-of-memory error reporting by a single Java thread
2654 // outside of a JVM safepoint
2655 void HeapDumper::dump_heap_from_oome() {
2656   HeapDumper::dump_heap(true);
2657 }
2658 
2659 // Called by error reporting by a single Java thread outside of a JVM safepoint,
2660 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
2661 // callers are strictly serialized and guaranteed not to interfere below. For more
2662 // general use, however, this method will need modification to prevent
2663 // inteference when updating the static variables base_path and dump_file_seq below.
2664 void HeapDumper::dump_heap() {
2665   HeapDumper::dump_heap(false);
2666 }
2667 
2668 void HeapDumper::dump_heap(bool oome) {
2669   static char base_path[JVM_MAXPATHLEN] = {'\0'};
2670   static uint dump_file_seq = 0;
2671   char* my_path;
2672   const int max_digit_chars = 20;
2673 
2674   const char* dump_file_name = "java_pid";
2675   const char* dump_file_ext  = ".hprof";
2676 
2677   // The dump file defaults to java_pid<pid>.hprof in the current working
2678   // directory. HeapDumpPath=<file> can be used to specify an alternative
2679   // dump file name or a directory where dump file is created.
2680   if (dump_file_seq == 0) { // first time in, we initialize base_path
2681     // Calculate potentially longest base path and check if we have enough
2682     // allocated statically.
2683     const size_t total_length =
2684                       (HeapDumpPath == NULL ? 0 : strlen(HeapDumpPath)) +
2685                       strlen(os::file_separator()) + max_digit_chars +
2686                       strlen(dump_file_name) + strlen(dump_file_ext) + 1;
2687     if (total_length > sizeof(base_path)) {
2688       warning("Cannot create heap dump file.  HeapDumpPath is too long.");
2689       return;
2690     }
2691 
2692     bool use_default_filename = true;
2693     if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
2694       // HeapDumpPath=<file> not specified
2695     } else {
2696       strcpy(base_path, HeapDumpPath);
2697       // check if the path is a directory (must exist)
2698       DIR* dir = os::opendir(base_path);
2699       if (dir == NULL) {
2700         use_default_filename = false;
2701       } else {
2702         // HeapDumpPath specified a directory. We append a file separator
2703         // (if needed).
2704         os::closedir(dir);
2705         size_t fs_len = strlen(os::file_separator());
2706         if (strlen(base_path) >= fs_len) {
2707           char* end = base_path;
2708           end += (strlen(base_path) - fs_len);
2709           if (strcmp(end, os::file_separator()) != 0) {
2710             strcat(base_path, os::file_separator());
2711           }
2712         }
2713       }
2714     }
2715     // If HeapDumpPath wasn't a file name then we append the default name
2716     if (use_default_filename) {
2717       const size_t dlen = strlen(base_path);  // if heap dump dir specified
2718       jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s",
2719                    dump_file_name, os::current_process_id(), dump_file_ext);
2720     }
2721     const size_t len = strlen(base_path) + 1;
2722     my_path = (char*)os::malloc(len, mtInternal);
2723     if (my_path == NULL) {
2724       warning("Cannot create heap dump file.  Out of system memory.");
2725       return;
2726     }
2727     strncpy(my_path, base_path, len);
2728   } else {
2729     // Append a sequence number id for dumps following the first
2730     const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
2731     my_path = (char*)os::malloc(len, mtInternal);
2732     if (my_path == NULL) {
2733       warning("Cannot create heap dump file.  Out of system memory.");
2734       return;
2735     }
2736     jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
2737   }
2738   dump_file_seq++;   // increment seq number for next time we dump
2739 
2740   HeapDumper dumper(false /* no GC before heap dump */,
2741                     oome  /* pass along out-of-memory-error flag */);
2742   dumper.dump(my_path, tty);
2743   os::free(my_path);
2744 }