< prev index next >

src/share/vm/runtime/vmThread.cpp

Print this page

        

*** 34,43 **** --- 34,44 ---- #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" #include "trace/tracing.hpp" + #include "evtrace/traceEvents.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" #ifndef USDT2
*** 270,279 **** --- 271,284 ---- // Note that I cannot call os::set_priority because it expects Java // priorities and I am *explicitly* using OS priorities so that it's // possible to set the VM thread priority higher than any Java thread. os::set_native_priority( this, prio ); + if (EnableEventTracing) { + TraceEvents::write_thread_start(); + } + // Wait for VM_Operations until termination this->loop(); // Note the intention to exit before safepointing. // 6295565 This has the effect of waiting for any large tty
*** 456,470 **** --- 461,484 ---- Mutex::_no_safepoint_check_flag); // Force a safepoint since we have not had one for at least // 'GuaranteedSafepointInterval' milliseconds. This will run all // the clean-up processing that needs to be done regularly at a // safepoint + + if (EnableEventTracing) { + TraceEvents::write_safepoint_begin(TraceTypes::safepoint_periodic); + } + SafepointSynchronize::begin(); #ifdef ASSERT if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); #endif SafepointSynchronize::end(); + + if (EnableEventTracing) { + TraceEvents::write_safepoint_end(0); + } } _cur_vm_operation = _vm_queue->remove_next(); // If we are at a safepoint we will evaluate all the operations that // follow that also require a safepoint
*** 496,505 **** --- 510,524 ---- // follow that also require a safepoint if (_cur_vm_operation->evaluate_at_safepoint()) { _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned + if (EnableEventTracing) { + TraceEvents::write_safepoint_begin(TraceTypes::safepoint_for_vm_op); + } + + int vm_ops_evaluated = 1; SafepointSynchronize::begin(); evaluate_operation(_cur_vm_operation); // now process all queued safepoint ops, iteratively draining // the queue until there are none left do {
*** 513,522 **** --- 532,542 ---- evaluate_operation(_cur_vm_operation); _cur_vm_operation = next; if (PrintSafepointStatistics) { SafepointSynchronize::inc_vmop_coalesced_count(); } + vm_ops_evaluated++; } while (_cur_vm_operation != NULL); } // There is a chance that a thread enqueued a safepoint op // since we released the op-queue lock and initiated the safepoint. // So we drain the queue again if there is anything there, as an
*** 540,549 **** --- 560,573 ---- _vm_queue->set_drain_list(NULL); // Complete safepoint synchronization SafepointSynchronize::end(); + if (EnableEventTracing) { + TraceEvents::write_safepoint_end((u4) vm_ops_evaluated); + } + } else { // not a safepoint operation if (TraceLongCompiles) { elapsedTimer t; t.start(); evaluate_operation(_cur_vm_operation);
*** 576,588 **** --- 600,620 ---- // if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) { long interval = SafepointSynchronize::last_non_safepoint_interval(); bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval); if (SafepointALot || max_time_exceeded) { + if (EnableEventTracing) { + TraceEvents::write_safepoint_begin(TraceTypes::safepoint_periodic); + } + HandleMark hm(VMThread::vm_thread()); SafepointSynchronize::begin(); SafepointSynchronize::end(); + + if (EnableEventTracing) { + TraceEvents::write_safepoint_end(0); + } } } } }
*** 665,677 **** --- 697,717 ---- // Release all internal handles after operation is evaluated HandleMark hm(t); _cur_vm_operation = op; if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { + if (EnableEventTracing) { + TraceEvents::write_safepoint_begin(TraceTypes::safepoint_for_vm_op); + } + SafepointSynchronize::begin(); op->evaluate(); SafepointSynchronize::end(); + + if (EnableEventTracing) { + TraceEvents::write_safepoint_end(1); + } } else { op->evaluate(); } // Free memory if needed
< prev index next >