1 /*
   2  * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved.
   3  *
   4  * This file is part of the Lock Contention Tracing Subsystem for the HotSpot
   5  * Virtual Machine, which is developed at Christian Doppler Laboratory on
   6  * Monitoring and Evolution of Very-Large-Scale Software Systems. Please
   7  * contact us at <http://mevss.jku.at/> if you need additional information
   8  * or have any questions.
   9  *
  10  * This code is free software; you can redistribute it and/or modify it
  11  * under the terms of the GNU General Public License version 2 only, as
  12  * published by the Free Software Foundation.
  13  *
  14  * This code is distributed in the hope that it will be useful, but WITHOUT
  15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  17  * version 2 for more details (a copy is included in the LICENSE file that
  18  * accompanied this code).
  19  *
  20  * You should have received a copy of the GNU General Public License version
  21  * 2 along with this work. If not, see <http://www.gnu.org/licenses/>.
  22  *
  23  */
  24 
  25 #include "evtrace/traceEvents.hpp"
  26 
  27 #include "evtrace/traceWriter.hpp"
  28 #include "evtrace/traceManager.hpp"
  29 #include "evtrace/traceMetadata.hpp"
  30 
  31 #include "runtime/vframe.hpp"
  32 
  33 void TraceEvents::initialize() {
  34   assert(_event_max <= UINT8_MAX, "event type does not fit");
  35   assert(_park_return_code_max <= UINT8_MAX, "park return code does not fit");
  36   assert(_safepoint_reason_max <= UINT8_MAX, "safepoint reason does not fit");
  37   assert(_monitor_enter_wait_max <= UINT8_MAX, "monitor wait code does not fit");
  38   assert(_monitor_entered_flags_max <= UINT8_MAX, "monitor entered flags do not fit");
  39 }
  40 
  41 inline bool TraceEvents::can_write() {
  42   return TraceManager::is_initialized();
  43 }
  44 
  45 inline TraceTypes::timestamp TraceEvents::time_now() {
  46   return TraceManager::metadata()->time_now();
  47 }
  48 
  49 inline TraceTypes::thread_id TraceEvents::thread_id_for(Thread* t) {
  50   return TraceManager::metadata()->thread_id(t);
  51 }
  52 
  53 inline TraceTypes::object_id TraceEvents::object_id_for(oop obj) {
  54   return TraceManager::metadata()->object_id(obj);
  55 }
  56 
  57 inline TraceTypes::objmonitor_id TraceEvents::objmonitor_id_for(ObjectMonitor* om) {
  58   return TraceManager::metadata()->objmonitor_id(om);
  59 }
  60 
  61 inline TraceTypes::object_id TraceEvents::objmonitor_object_id_for(ObjectMonitor* om) {
  62   return TraceManager::metadata()->objmonitor_object_id(om);
  63 }
  64 
  65 inline TraceTypes::classloader_id TraceEvents::classloader_id_for(ClassLoaderData* cld) {
  66   return TraceManager::metadata()->classloader_id(cld);
  67 }
  68 
  69 inline TraceTypes::method_id TraceEvents::method_id_for(Method* m) {
  70   bool added;
  71   method_id mid = TraceManager::metadata()->method_id(m, added);
  72   assert(!added, "must have been known");
  73   return mid;
  74 }
  75 
  76 void TraceEvents::write_thread_start() {
  77   if (!can_write())
  78     return;
  79 
  80   Thread *t = Thread::current();
  81   assert(t != NULL, "thread not attached");
  82 
  83   ResourceMark rm;
  84   const char *name = NULL;
  85   if (t->is_Java_thread()) {
  86     name = ((JavaThread *) t)->get_thread_name();
  87   } else if (t->is_Named_thread()) {
  88     name = ((NamedThread *) t)->name();
  89   }
  90 
  91   const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 64);
  92   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + name_nbytes);
  93   wr.put_event_type(event_thread_start);
  94   wr.put_timestamp(time_now());
  95   wr.put_utf8str(name, name_nbytes);
  96 }
  97 
  98 void TraceEvents::write_thread_name_change(Thread *t) {
  99   assert(t != NULL, "null thread");
 100 
 101   if (!can_write())
 102     return;
 103 
 104   ResourceMark rm;
 105   const char *name = NULL;
 106   if (t->is_Java_thread()) {
 107     name = ((JavaThread *) t)->get_thread_name();
 108   }
 109 
 110   const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 64);
 111   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(thread_id) + name_nbytes);
 112   wr.put_event_type(event_thread_name_change);
 113   wr.put_timestamp(time_now());
 114   wr.put_thread_id(thread_id_for(t));
 115   wr.put_utf8str(name, name_nbytes);
 116 }
 117 
 118 void TraceEvents::write_thread_state_change(Thread *t) {
 119   assert(t != NULL, "null thread");
 120 
 121   if (!can_write())
 122     return;
 123 
 124   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(thread_id) + sizeof(thread_state));
 125   wr.put_event_type(event_thread_state_change);
 126   wr.put_timestamp(time_now());
 127   wr.put_thread_id(thread_id_for(t));
 128   wr.put_thread_state(TraceManager::metadata()->thread_state(t));
 129 }
 130 
 131 void TraceEvents::write_thread_interrupt(Thread *t) {
 132   assert(t != NULL, "null thread");
 133 
 134   if (!can_write())
 135     return;
 136 
 137   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(thread_id));
 138   wr.put_event_type(event_thread_interrupt);
 139   wr.put_timestamp(time_now());
 140   wr.put_seq_num(TraceManager::metadata()->next_global_seq());
 141   wr.put_thread_id(thread_id_for(t));
 142 }
 143 
 144 void TraceEvents::write_thread_exit() {
 145   if (!can_write())
 146     return;
 147 
 148   TraceWriter wr(sizeof(event_type) + sizeof(timestamp));
 149   wr.put_event_type(event_thread_exit);
 150   wr.put_timestamp(time_now());
 151 }
 152 
 153 void TraceEvents::write_thread_park_begin(JavaThread *t, bool is_absolute, timestamp park_time) {
 154   assert(t != NULL, "null thread");
 155 
 156   if (!can_write())
 157     return;
 158 
 159   Klass* klass = NULL;
 160   object_id oid = 0;
 161   {
 162     oop blocker = t->current_park_blocker();
 163     if (blocker != NULL) {
 164       klass = blocker->klass();
 165       oid = object_id_for(blocker);
 166       assert(oid != 0, "just checking");
 167     }
 168   } // blocker oop may not be valid anymore after object_id_for()
 169 
 170   // recheck necessary, because object_id_for can cause a safepoint check
 171   if (!can_write())
 172     return;
 173 
 174   class_id cid = 0;
 175   if (klass != NULL) {
 176     cid = retrieve_class_id_or_write_metadata(klass);
 177     assert(cid != 0, "just checking");
 178   }
 179 
 180   assert(t->has_last_Java_frame(), "must have");
 181   stack_id stid = retrieve_stack_id_or_write_metadata(t);
 182 
 183   int nesting_level = t->nesting_level();
 184   if (nesting_level > UINT8_MAX) {
 185     nesting_level = UINT8_MAX;
 186   }
 187 
 188   No_Safepoint_Verifier nsv(true, false);
 189   seq_num seq = TraceManager::metadata()->next_global_seq();
 190   t->set_park_last_global_seq(seq);
 191 
 192   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(object_id) + sizeof(class_id) + sizeof(stack_id) + sizeof(u1) + sizeof(u1) + sizeof(timestamp));
 193   wr.put_event_type(event_thread_park_begin);
 194   wr.put_timestamp(time_now());
 195   wr.put_seq_num(seq);
 196   wr.put_object_id(oid);
 197   wr.put_class_id(cid);
 198   wr.put_stack_id(stid);
 199   wr.put_u1(nesting_level);
 200   wr.put_u1(is_absolute);
 201   wr.put_timestamp(park_time);
 202 }
 203 
 204 void TraceEvents::write_thread_park_end(Thread *t, seq_num seq, seq_num unpark_seq, park_return_code return_code) {
 205   assert(t != NULL, "null thread");
 206   assert(return_code > _park_return_code_min && return_code < _park_return_code_max, "invalid return_code");
 207 
 208   if (!can_write())
 209     return;
 210 
 211   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(u1));
 212   wr.put_event_type(event_thread_park_end);
 213   wr.put_timestamp(time_now());
 214   wr.put_seq_num(seq);
 215   wr.put_seq_num(unpark_seq);
 216   wr.put_u1(return_code);
 217 }
 218 
 219 void TraceEvents::write_thread_unpark(thread_id thread, seq_num seq, seq_num chained_seq) {
 220   if (!can_write())
 221     return;
 222 
 223   stack_id stid = 0;
 224   Thread *t = Thread::current();
 225   if (t->is_Java_thread()) {
 226     JavaThread *jt = (JavaThread*) t;
 227     assert(jt->has_last_Java_frame(), "must have");
 228     stid = retrieve_stack_id_or_write_metadata(jt);
 229   }
 230 
 231   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(thread_id) + sizeof(stack_id));
 232   wr.put_event_type(event_thread_unpark);
 233   wr.put_timestamp(time_now());
 234   wr.put_seq_num(seq);
 235   wr.put_seq_num(chained_seq);
 236   wr.put_thread_id(thread);
 237   wr.put_stack_id(stid);
 238 }
 239 
 240 void TraceEvents::write_monitor_inflate(ObjectMonitor* m, seq_num seq) {
 241   if (!can_write())
 242     return;
 243 
 244   class_id cid = retrieve_class_id_or_write_metadata(((oop)m->object())->klass());
 245 
 246   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(object_id) + sizeof(class_id));
 247   wr.put_event_type(event_monitor_inflate);
 248   wr.put_timestamp(time_now());
 249   wr.put_seq_num(seq);
 250   wr.put_objmonitor_id(objmonitor_id_for(m));
 251   wr.put_object_id(objmonitor_object_id_for(m));
 252   wr.put_class_id(cid);
 253 }
 254 
 255 void TraceEvents::write_monitor_deflate(ObjectMonitor* m) {
 256   if (!can_write())
 257     return;
 258 
 259   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id));
 260   wr.put_event_type(event_monitor_deflate);
 261   wr.put_timestamp(time_now());
 262   wr.put_seq_num(m->next_trace_seq());
 263   wr.put_objmonitor_id(objmonitor_id_for(m));
 264 }
 265 
 266 TraceTypes::class_id TraceEvents::retrieve_class_id_or_write_metadata(Klass *klass) {
 267   assert(can_write(), "caller's responsibility");
 268 
 269   bool is_new;
 270   class_id id = TraceManager::metadata()->class_id(klass, is_new);
 271   if (is_new) {
 272     ResourceMark rm;
 273     const char *name = klass->name()->as_utf8();
 274 
 275     const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 256);
 276     TraceWriter wr(sizeof(event_type) + sizeof(class_id) + sizeof(classloader_id) + name_nbytes);
 277     wr.put_event_type(event_class_metadata);
 278     wr.put_class_id(id);
 279     wr.put_classloader_id(classloader_id_for(klass->class_loader_data()));
 280     wr.put_utf8str(name, name_nbytes);
 281   }
 282   return id;
 283 }
 284 
 285 TraceTypes::method_id TraceEvents::retrieve_method_id_or_write_metadata(Method *method) {
 286   assert(can_write(), "caller's responsibility");
 287 
 288   bool is_new;
 289   method_id id = TraceManager::metadata()->method_id(method, is_new);
 290   if (is_new) {
 291     class_id holder_id = retrieve_class_id_or_write_metadata(method->method_holder());
 292 
 293     ResourceMark rm;
 294     const char *name = method->name()->as_utf8(),
 295                *sig  = method->signature()->as_utf8();
 296 
 297     const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 256),
 298                  sig_nbytes  = TraceWriter::nbytes_for_utf8str(sig, 1024);
 299     TraceWriter wr(sizeof(event_type) + sizeof(method_id) + sizeof(class_id) + name_nbytes + sig_nbytes);
 300     wr.put_event_type(event_method_metadata);
 301     wr.put_method_id(id);
 302     wr.put_class_id(holder_id);
 303     wr.put_utf8str(name, name_nbytes);
 304     wr.put_utf8str(sig, sig_nbytes);
 305   }
 306   return id;
 307 }
 308 
 309 TraceTypes::stack_id TraceEvents::retrieve_stack_id_or_write_metadata(JavaThread *t, stack_id preallocated_id) {
 310   class StatsUpdate : StackObj {
 311   public:
 312     bool     truncated;
 313     intptr_t frames, reused_memento_frames;
 314     StatsUpdate() : truncated(false), frames(0), reused_memento_frames(0) {}
 315     ~StatsUpdate() {
 316       TraceManager::update_stack_trace_stats(truncated, frames, reused_memento_frames);
 317     }
 318   };
 319 
 320   assert(can_write(), "caller's responsibility");
 321 
 322   if (!EnableEventTracingStackTraces) {
 323     assert(preallocated_id == 0, "no preallocation allowed");
 324     return (TraceTypes::stack_id) 0;
 325   }
 326 
 327   StatsUpdate stats;
 328 
 329   bool have_memento = (t->memento_original_return_address() != NULL);
 330   const CachedTraceStack *memento_stack = NULL;
 331   if (have_memento) {
 332     // we have patched a stack frame earlier and it has not returned yet
 333     memento_stack = t->memento_stack_trace();
 334     // NOTE: after metadata has been purged, memento stack trace is NULL (safe to ignore)
 335   }
 336 
 337   TraceStackBuilder sb;
 338   bool new_memento = false;
 339 
 340   RegisterMap rm(t, false);
 341   frame fr = t->last_frame();
 342   int index = -1;
 343   while (!fr.is_first_frame() && !sb.is_full()) {
 344     if ((fr.cb() != NULL && fr.cb()->is_nmethod()) || fr.is_interpreted_frame()) {
 345       index++;
 346       sb.add_frame(&fr);
 347 
 348       if (memento_stack != NULL) {
 349         // if the memento stack trace is truncated, we can't use it if we have fewer
 350         // frames above the patched frame or we'll be missing frames at the bottom
 351         if (!memento_stack->is_truncated() || index >= EventTracingStackMementoFrame) {
 352           if (fr.is_memento_marked(t)) {
 353             break;
 354           }
 355         }
 356       } else if (!have_memento && index == EventTracingStackMementoFrame) {
 357         fr.memento_mark(t);
 358         new_memento = true;
 359       }
 360     }
 361     fr = fr.sender(&rm);
 362   }
 363 
 364   if (!fr.is_first_frame() && sb.is_full()) {
 365     sb.set_truncated();
 366   }
 367 
 368   CompositeTraceStack cps(sb);
 369 
 370   if (memento_stack != NULL && !fr.is_first_frame()) {
 371     // reached memento frame
 372 
 373 #ifdef ASSERT
 374     // another stack walk for validation
 375     TraceStackBuilder sb0;
 376     RegisterMap rm0(t, false);
 377     frame fr0 = t->last_frame();
 378     while (!fr0.is_first_frame() && !sb0.is_full()) {
 379       if (fr0.is_interpreted_frame() || fr0.is_compiled_frame() || fr0.is_native_frame()) {
 380         sb0.add_frame(&fr0);
 381       }
 382       fr0 = fr0.sender(&rm0);
 383     }
 384     if (!fr0.is_first_frame()) {
 385       assert(sb0.is_full(), "incomplete stack walk");
 386       sb0.set_truncated();
 387     }
 388     CompositeTraceStack cps0(sb0);
 389 #endif
 390 
 391     if (index == EventTracingStackMementoFrame && sb.range_equals(0, memento_stack, 0, index + 1)) {
 392       // top frames are equal, exact match
 393       assert(cps0.equals(memento_stack), "sanity");
 394       stats.frames = memento_stack->count();
 395       stats.reused_memento_frames = stats.frames - sb.count(); // still had to walk the top frames
 396       stats.truncated = memento_stack->is_truncated();
 397       return memento_stack->id();
 398     }
 399 
 400     // complete with frames from cache
 401     cps.set_bottom(memento_stack, EventTracingStackMementoFrame + 1);
 402     stats.reused_memento_frames = cps.count() - sb.count();
 403 
 404     assert(cps.equals(cps0), "sanity");
 405   }
 406 
 407   stats.frames = cps.count();
 408   stats.truncated = cps.is_truncated();
 409 
 410   bool is_known;
 411   const CachedTraceStack *cached = TraceManager::metadata()->get_or_try_add_stack(cps, is_known, preallocated_id);
 412   TraceTypes::stack_id id;
 413   if (cached != NULL) {
 414     id = cached->id();
 415     assert(is_known || preallocated_id == 0 || cached->id() == preallocated_id, "sanity");
 416   } else {
 417     // insert failed (full)
 418     id = preallocated_id;
 419     if (id == 0) {
 420       // allocate a one-time id
 421       // FIXME: wasteful, since get_or_try_add_stack() likely also allocated an id and threw it away
 422       id = TraceManager::metadata()->next_stack_id();
 423     }
 424   }
 425   if (!is_known) {
 426     write_stack_metadata(id, cps);
 427   } else if (preallocated_id != 0) {
 428     write_identical_stacks_metadata(preallocated_id, cached->id());
 429   }
 430   if (new_memento) {
 431     t->set_memento_stack_trace(cached);
 432   }
 433   return id;
 434 }
 435 
 436 void TraceEvents::write_stack_metadata(stack_id id, const CompositeTraceStack &ts) {
 437   assert(can_write(), "caller's responsibility");
 438 
 439   // TODO: with heavy inlining, stack traces can become so large
 440   // that they no longer fit in a single trace buffer
 441 
 442   size_t count = 0;
 443 
 444   // make methods known
 445   TraceStackVframeIterator it(ts);
 446   while (it.has_next()) {
 447     it.next();
 448     retrieve_class_id_or_write_metadata(it.method()->method_holder());
 449     retrieve_method_id_or_write_metadata(it.method());
 450     count++;
 451   }
 452   it.reset();
 453 
 454   TraceWriter wr(sizeof(event_type) + sizeof(stack_id) + sizeof(u1) + sizeof(u2) + count * (sizeof(method_id) + sizeof(method_bci)));
 455   wr.put_event_type(event_stack_metadata);
 456   wr.put_stack_id(id);
 457   wr.put_u1(ts.is_truncated() ? 0 : 1);
 458   assert(count == (u2) count, "must fit");
 459   wr.put_u2(count);
 460   while (it.has_next()) {
 461     it.next();
 462     wr.put_method_id(method_id_for(it.method()));
 463     wr.put_method_bci(it.bci());
 464   }
 465 }
 466 
 467 void TraceEvents::write_identical_stacks_metadata(stack_id id, stack_id known) {
 468   assert(can_write(), "caller's responsibility");
 469 
 470   TraceWriter wr(sizeof(event_type) + sizeof(stack_id) + sizeof(stack_id));
 471   wr.put_event_type(event_identical_stacks_metadata);
 472   wr.put_stack_id(id);
 473   wr.put_stack_id(known);
 474 }
 475 
 476 void TraceEvents::write_class_loader_unload(ClassLoaderData* cld) {
 477   if(!can_write())
 478     return;
 479 
 480   TraceWriter wr(sizeof(event_type) + sizeof(classloader_id));
 481   wr.put_event_type(event_class_loader_unload);
 482   wr.put_classloader_id(classloader_id_for(cld));
 483 }
 484 
 485 void TraceEvents::write_monitor_contended_enter(ObjectMonitor *m, monitor_enter_wait wait) {
 486   if (!can_write())
 487     return;
 488 
 489   stack_id stid = 0;
 490   if (JavaThread::current()->has_last_Java_frame()) {
 491     stid = retrieve_stack_id_or_write_metadata(JavaThread::current());
 492   }
 493 
 494   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(stack_id) + sizeof(u1));
 495   wr.put_event_type(event_monitor_contended_enter);
 496   wr.put_timestamp(time_now());
 497   wr.put_seq_num(m->next_trace_seq());
 498   wr.put_objmonitor_id(objmonitor_id_for(m));
 499   wr.put_stack_id(stid);
 500   wr.put_u1(wait);
 501 }
 502 
 503 void TraceEvents::write_monitor_contended_entered(ObjectMonitor *m, monitor_entered_flags flags) {
 504   if (!can_write())
 505     return;
 506 
 507   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(u1));
 508   wr.put_event_type(event_monitor_contended_entered);
 509   wr.put_timestamp(time_now());
 510   wr.put_seq_num(m->next_trace_seq());
 511   wr.put_objmonitor_id(objmonitor_id_for(m));
 512   wr.put_u1(flags);
 513 }
 514 
 515 void TraceEvents::write_monitor_contended_exited(ObjectMonitor *m, seq_num seq, stack_id preallocated_stack_id, bool resolve_stack) {
 516   if (!can_write())
 517     return;
 518 
 519   TraceTypes::stack_id stid = preallocated_stack_id;
 520   if (resolve_stack) {
 521     if (JavaThread::current()->has_last_Java_frame()) {
 522       stid = retrieve_stack_id_or_write_metadata(JavaThread::current(), preallocated_stack_id);
 523     } else if (preallocated_stack_id != 0) {
 524       // we preallocated a stack id, so we must resolve it to the unknown stack
 525       write_identical_stacks_metadata(preallocated_stack_id, 0);
 526     }
 527   }
 528 
 529   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(stack_id));
 530   wr.put_event_type(event_monitor_contended_exited);
 531   wr.put_timestamp(time_now());
 532   wr.put_seq_num(seq);
 533   wr.put_objmonitor_id(objmonitor_id_for(m));
 534   wr.put_stack_id(stid);
 535 }
 536 
 537 void TraceEvents::write_monitor_dummy(ObjectMonitor *m, seq_num seq) {
 538   if (!can_write())
 539     return;
 540 
 541   TraceWriter wr(sizeof(event_type) + sizeof(seq_num) + sizeof(objmonitor_id));
 542   wr.put_event_type(event_monitor_dummy);
 543   wr.put_seq_num(seq);
 544   wr.put_objmonitor_id(objmonitor_id_for(m));
 545 }
 546 
 547 void TraceEvents::write_safepoint_begin(safepoint_reason reason) {
 548   assert(reason > _safepoint_reason_min && reason < _safepoint_reason_max, "invalid reason");
 549 
 550   if (!can_write())
 551     return;
 552 
 553   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(u1));
 554   wr.put_event_type(event_safepoint_begin);
 555   wr.put_timestamp(time_now());
 556   wr.put_u1((u1) reason);
 557 }
 558 
 559 void TraceEvents::write_safepoint_end(u4 vmops_processed) {
 560   if (!can_write())
 561     return;
 562 
 563   if (vmops_processed > UINT8_MAX)
 564     vmops_processed = UINT8_MAX;
 565 
 566   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(u1));
 567   wr.put_event_type(event_safepoint_end);
 568   wr.put_timestamp(time_now());
 569   wr.put_u1((u1) vmops_processed);
 570 }
 571 
 572 void TraceEvents::write_vm_end() {
 573   if (!can_write())
 574     return;
 575 
 576   TraceWriter wr(sizeof(event_type) + sizeof(timestamp));
 577   wr.put_event_type(event_vm_end);
 578   wr.put_timestamp(time_now());
 579 }
 580 
 581 void TraceEvents::write_metadata_reset() {
 582   if (!can_write())
 583     return;
 584 
 585   TraceWriter wr(sizeof(event_type));
 586   wr.put_event_type(event_metadata_reset);
 587 }
 588 
 589 void TraceEvents::write_group(JavaThread* t, seq_num park_global_seq_begin_ref, oop source) {
 590   assert(t != NULL, "null thread");
 591   assert(source != NULL, "null source");
 592 
 593   if (!can_write())
 594     return;
 595 
 596   Klass* klass = source->klass();
 597   object_id oid = object_id_for(source);
 598   source = NULL; // source oop may not be valid anymore after object_id_for()
 599   assert(oid != 0, "just checking");
 600 
 601   // recheck necessary, because object_id_for() can cause a safepoint check
 602   if (!can_write())
 603     return;
 604 
 605   class_id cid = retrieve_class_id_or_write_metadata(klass);
 606   assert(cid != 0, "just checking");
 607 
 608   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(object_id) + sizeof(class_id));
 609   wr.put_event_type(event_group);
 610   wr.put_timestamp(time_now());
 611   wr.put_seq_num(TraceManager::metadata()->next_global_seq());
 612   wr.put_seq_num(park_global_seq_begin_ref);
 613   wr.put_object_id(oid);
 614   wr.put_class_id(cid);
 615 }
 616 
 617 void TraceEvents::write_marker(const char *label) {
 618   if (!can_write())
 619     return;
 620 
 621   const size_t label_nbytes = TraceWriter::nbytes_for_utf8str(label, 64);
 622 
 623   TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + label_nbytes);
 624   wr.put_event_type(event_marker);
 625   wr.put_timestamp(time_now());
 626   wr.put_utf8str(label, label_nbytes);
 627 }
 628 
 629 void TraceEventThreadParkEnd::do_write() {
 630   TraceEvents::write_thread_park_end(_thread, _seq, _unpark_seq, _return_code);
 631 }
 632 
 633 void TraceEventThreadUnpark::do_write() {
 634   assert(_chained_seq != -1, "chained_seq not set");
 635   TraceEvents::write_thread_unpark(_thread_id, _seq, _chained_seq);
 636 }