< prev index next >

src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp

Print this page




 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data:
 336 *
 337 * 1. if the thread state is not "_thread_in_vm", we will quick transition
 338 *    it to "_thread_in_vm".
 339 * 2. if the thread is the owner of some critical lock(s), unlock them.
 340 *
 341 * If we end up deadlocking in the attempt of dumping out jfr data,
 342 * we rely on the WatcherThread task "is_error_reported()",
 343 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 344 * This "safety net" somewhat explains the aggressiveness in this attempt.
 345 *
 346 */
 347 static bool prepare_for_emergency_dump() {
 348   if (JfrStream_lock->owned_by_self()) {
 349     // crashed during jfr rotation, disallow recursion
 350     return false;
 351   }
 352   Thread* const thread = Thread::current();
 353   if (thread->is_Watcher_thread()) {
 354     // need WatcherThread as a safeguard against potential deadlocks
 355     return false;
 356   }
 357 
 358   if (thread->is_Java_thread()) {
 359     ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
 360   }
 361 
 362 #ifdef ASSERT
 363   Mutex* owned_lock = thread->owned_locks();
 364   while (owned_lock != NULL) {
 365     Mutex* next = owned_lock->next();
 366     owned_lock->unlock();
 367     owned_lock = next;
 368   }
 369 #endif // ASSERT
 370 
 371   if (Threads_lock->owned_by_self()) {
 372     Threads_lock->unlock();
 373   }
 374 
 375   if (Module_lock->owned_by_self()) {
 376     Module_lock->unlock();
 377   }
 378 
 379   if (ClassLoaderDataGraph_lock->owned_by_self()) {


 411   if (JfrMsg_lock->owned_by_self()) {
 412     JfrMsg_lock->unlock();
 413   }
 414 
 415   if (JfrBuffer_lock->owned_by_self()) {
 416     JfrBuffer_lock->unlock();
 417   }
 418 
 419   if (JfrStacktrace_lock->owned_by_self()) {
 420     JfrStacktrace_lock->unlock();
 421   }
 422   return true;
 423 }
 424 
 425 static volatile int jfr_shutdown_lock = 0;
 426 
 427 static bool guard_reentrancy() {
 428   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 429 }
 430 






















 431 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 432   if (!(guard_reentrancy() && prepare_for_emergency_dump())) {
 433     return;
 434   }











 435   EventDumpReason event;
 436   if (event.should_commit()) {
 437     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 438     event.set_recordingId(-1);
 439     event.commit();
 440   }
 441   if (!exception_handler) {
 442     // OOM
 443     LeakProfiler::emit_events(max_jlong, false);
 444   }
 445   const int messages = MSGBIT(MSG_VM_ERROR);
 446   JfrRecorderService service;
 447   service.rotate(messages);
 448 }


 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data.




 336 *
 337 * If we end up deadlocking in the attempt of dumping out jfr data,
 338 * we rely on the WatcherThread task "is_error_reported()",
 339 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 340 * This "safety net" somewhat explains the aggressiveness in this attempt.
 341 *
 342 */
 343 static bool prepare_for_emergency_dump(Thread* thread) {
 344   assert(thread != NULL, "invariant");
 345 



 346   if (thread->is_Watcher_thread()) {
 347     // need WatcherThread as a safeguard against potential deadlocks
 348     return false;
 349   }
 350   if (JfrStream_lock->owned_by_self()) {
 351     // crashed during jfr rotation, disallow recursion
 352     return false;
 353   }
 354 
 355 #ifdef ASSERT
 356   Mutex* owned_lock = thread->owned_locks();
 357   while (owned_lock != NULL) {
 358     Mutex* next = owned_lock->next();
 359     owned_lock->unlock();
 360     owned_lock = next;
 361   }
 362 #endif // ASSERT
 363 
 364   if (Threads_lock->owned_by_self()) {
 365     Threads_lock->unlock();
 366   }
 367 
 368   if (Module_lock->owned_by_self()) {
 369     Module_lock->unlock();
 370   }
 371 
 372   if (ClassLoaderDataGraph_lock->owned_by_self()) {


 404   if (JfrMsg_lock->owned_by_self()) {
 405     JfrMsg_lock->unlock();
 406   }
 407 
 408   if (JfrBuffer_lock->owned_by_self()) {
 409     JfrBuffer_lock->unlock();
 410   }
 411 
 412   if (JfrStacktrace_lock->owned_by_self()) {
 413     JfrStacktrace_lock->unlock();
 414   }
 415   return true;
 416 }
 417 
 418 static volatile int jfr_shutdown_lock = 0;
 419 
 420 static bool guard_reentrancy() {
 421   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 422 }
 423 
 424 class JavaThreadInVM : public StackObj {
 425  private:
 426   JavaThread* const _jt;
 427   JavaThreadState _original_state;
 428  public:
 429 
 430   JavaThreadInVM(Thread* t) : _jt(t->is_Java_thread() ? (JavaThread*)t : NULL),
 431                               _original_state(_thread_max_state) {
 432     if ((_jt != NULL) && (_jt->thread_state() != _thread_in_vm)) {
 433       _original_state = _jt->thread_state();
 434       _jt->set_thread_state(_thread_in_vm);
 435     }
 436   }
 437 
 438   ~JavaThreadInVM() {
 439     if (_original_state != _thread_max_state) {
 440       _jt->set_thread_state(_original_state);
 441     }
 442   }
 443 
 444 };
 445 
 446 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 447   if (!guard_reentrancy()) {
 448     return;
 449   }
 450 
 451   Thread* thread = Thread::current_or_null_safe();
 452   if (thread == NULL) {
 453     return;
 454   }
 455   // Ensure a JavaThread is _thread_in_vm when we make this call
 456   JavaThreadInVM jtivm(thread);
 457   if (!prepare_for_emergency_dump(thread)) {
 458     return;
 459   }
 460 
 461   EventDumpReason event;
 462   if (event.should_commit()) {
 463     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 464     event.set_recordingId(-1);
 465     event.commit();
 466   }
 467   if (!exception_handler) {
 468     // OOM
 469     LeakProfiler::emit_events(max_jlong, false);
 470   }
 471   const int messages = MSGBIT(MSG_VM_ERROR);
 472   JfrRecorderService service;
 473   service.rotate(messages);
 474 }
< prev index next >