< prev index next >

src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp

Print this page




 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data:
 336 *
 337 * 1. if the thread state is not "_thread_in_vm", we will quick transition
 338 *    it to "_thread_in_vm".
 339 * 2. if the thread is the owner of some critical lock(s), unlock them.
 340 *
 341 * If we end up deadlocking in the attempt of dumping out jfr data,
 342 * we rely on the WatcherThread task "is_error_reported()",
 343 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 344 * This "safety net" somewhat explains the aggressiveness in this attempt.
 345 *
 346 */
 347 static bool prepare_for_emergency_dump() {



 348   if (JfrStream_lock->owned_by_self()) {
 349     // crashed during jfr rotation, disallow recursion
 350     return false;
 351   }
 352   Thread* const thread = Thread::current();
 353   if (thread->is_Watcher_thread()) {
 354     // need WatcherThread as a safeguard against potential deadlocks
 355     return false;
 356   }
 357 
 358   if (thread->is_Java_thread()) {
 359     ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
 360   }
 361 
 362 #ifdef ASSERT
 363   Mutex* owned_lock = thread->owned_locks();
 364   while (owned_lock != NULL) {
 365     Mutex* next = owned_lock->next();
 366     owned_lock->unlock();
 367     owned_lock = next;
 368   }
 369 #endif // ASSERT
 370 
 371   if (Threads_lock->owned_by_self()) {
 372     Threads_lock->unlock();
 373   }
 374 
 375   if (Module_lock->owned_by_self()) {
 376     Module_lock->unlock();
 377   }
 378 
 379   if (ClassLoaderDataGraph_lock->owned_by_self()) {
 380     ClassLoaderDataGraph_lock->unlock();
 381   }


 411   if (JfrMsg_lock->owned_by_self()) {
 412     JfrMsg_lock->unlock();
 413   }
 414 
 415   if (JfrBuffer_lock->owned_by_self()) {
 416     JfrBuffer_lock->unlock();
 417   }
 418 
 419   if (JfrStacktrace_lock->owned_by_self()) {
 420     JfrStacktrace_lock->unlock();
 421   }
 422   return true;
 423 }
 424 
 425 static volatile int jfr_shutdown_lock = 0;
 426 
 427 static bool guard_reentrancy() {
 428   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 429 }
 430 
























 431 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 432   if (!(guard_reentrancy() && prepare_for_emergency_dump())) {
 433     return;
 434   }









 435   EventDumpReason event;
 436   if (event.should_commit()) {
 437     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 438     event.set_recordingId(-1);
 439     event.commit();
 440   }
 441   if (!exception_handler) {
 442     // OOM
 443     LeakProfiler::emit_events(max_jlong, false);
 444   }
 445   const int messages = MSGBIT(MSG_VM_ERROR);
 446   JfrRecorderService service;
 447   service.rotate(messages);
 448 }


 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data.




 336 *
 337 * If we end up deadlocking in the attempt of dumping out jfr data,
 338 * we rely on the WatcherThread task "is_error_reported()",
 339 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 340 * This "safety net" somewhat explains the aggressiveness in this attempt.
 341 *
 342 */
 343 static bool prepare_for_emergency_dump() {
 344   Thread* const thread = Thread::current_or_null_safe();
 345   assert(thread != NULL, "invariant");
 346 
 347   if (JfrStream_lock->owned_by_self()) {
 348     // crashed during jfr rotation, disallow recursion
 349     return false;
 350   }

 351   if (thread->is_Watcher_thread()) {
 352     // need WatcherThread as a safeguard against potential deadlocks
 353     return false;
 354   }
 355 




 356 #ifdef ASSERT
 357   Mutex* owned_lock = thread->owned_locks();
 358   while (owned_lock != NULL) {
 359     Mutex* next = owned_lock->next();
 360     owned_lock->unlock();
 361     owned_lock = next;
 362   }
 363 #endif // ASSERT
 364 
 365   if (Threads_lock->owned_by_self()) {
 366     Threads_lock->unlock();
 367   }
 368 
 369   if (Module_lock->owned_by_self()) {
 370     Module_lock->unlock();
 371   }
 372 
 373   if (ClassLoaderDataGraph_lock->owned_by_self()) {
 374     ClassLoaderDataGraph_lock->unlock();
 375   }


 405   if (JfrMsg_lock->owned_by_self()) {
 406     JfrMsg_lock->unlock();
 407   }
 408 
 409   if (JfrBuffer_lock->owned_by_self()) {
 410     JfrBuffer_lock->unlock();
 411   }
 412 
 413   if (JfrStacktrace_lock->owned_by_self()) {
 414     JfrStacktrace_lock->unlock();
 415   }
 416   return true;
 417 }
 418 
 419 static volatile int jfr_shutdown_lock = 0;
 420 
 421 static bool guard_reentrancy() {
 422   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 423 }
 424 
 425 class JavaThreadInVM : public StackObj {
 426  private:
 427   JavaThread* _jt;
 428   JavaThreadState _original_state;
 429 
 430  public:
 431 
 432   JavaThreadInVM(Thread* thread) : _jt(NULL),
 433                                    _original_state(_thread_max_state) {
 434     if ((thread != NULL) && thread->is_Java_thread()) {
 435       _jt = (JavaThread*)thread;
 436       _original_state = _jt->thread_state();
 437       _jt->set_thread_state(_thread_in_vm);
 438     }
 439   }
 440 
 441   ~JavaThreadInVM() {
 442     if (_jt != NULL) {
 443       _jt->set_thread_state(_original_state);
 444     }
 445   }
 446 
 447 };
 448 
 449 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 450   if (!guard_reentrancy()) {
 451     return;
 452   }
 453 
 454   Thread* thread = Thread::current_or_null_safe();
 455 
 456   // Ensure a JavaThread is _thread_in_vm when we make this call
 457   JavaThreadInVM jtivm(thread);
 458   if ((thread != NULL) && !prepare_for_emergency_dump()) {
 459     return;
 460   }
 461 
 462   EventDumpReason event;
 463   if (event.should_commit()) {
 464     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 465     event.set_recordingId(-1);
 466     event.commit();
 467   }
 468   if (!exception_handler) {
 469     // OOM
 470     LeakProfiler::emit_events(max_jlong, false);
 471   }
 472   const int messages = MSGBIT(MSG_VM_ERROR);
 473   JfrRecorderService service;
 474   service.rotate(messages);
 475 }
< prev index next >