< prev index next >

src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp

Print this page




 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data:
 336 *
 337 * 1. if the thread state is not "_thread_in_vm", we will quick transition
 338 *    it to "_thread_in_vm".
 339 * 2. if the thread is the owner of some critical lock(s), unlock them.
 340 *
 341 * If we end up deadlocking in the attempt of dumping out jfr data,
 342 * we rely on the WatcherThread task "is_error_reported()",
 343 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 344 * This "safety net" somewhat explains the aggressiveness in this attempt.
 345 *
 346 */
 347 static bool prepare_for_emergency_dump() {
 348   if (JfrStream_lock->owned_by_self()) {
 349     // crashed during jfr rotation, disallow recursion
 350     return false;
 351   }
 352   Thread* const thread = Thread::current();
 353   if (thread->is_Watcher_thread()) {
 354     // need WatcherThread as a safeguard against potential deadlocks
 355     return false;
 356   }
 357 
 358   if (thread->is_Java_thread()) {
 359     ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
 360   }
 361 
 362 #ifdef ASSERT
 363   Mutex* owned_lock = thread->owned_locks();
 364   while (owned_lock != NULL) {
 365     Mutex* next = owned_lock->next();
 366     owned_lock->unlock();
 367     owned_lock = next;
 368   }
 369 #endif // ASSERT
 370 
 371   if (Threads_lock->owned_by_self()) {
 372     Threads_lock->unlock();
 373   }
 374 
 375   if (Module_lock->owned_by_self()) {
 376     Module_lock->unlock();
 377   }
 378 
 379   if (ClassLoaderDataGraph_lock->owned_by_self()) {
 380     ClassLoaderDataGraph_lock->unlock();
 381   }


 411   if (JfrMsg_lock->owned_by_self()) {
 412     JfrMsg_lock->unlock();
 413   }
 414 
 415   if (JfrBuffer_lock->owned_by_self()) {
 416     JfrBuffer_lock->unlock();
 417   }
 418 
 419   if (JfrStacktrace_lock->owned_by_self()) {
 420     JfrStacktrace_lock->unlock();
 421   }
 422   return true;
 423 }
 424 
 425 static volatile int jfr_shutdown_lock = 0;
 426 
 427 static bool guard_reentrancy() {
 428   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 429 }
 430 
























 431 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 432   if (!(guard_reentrancy() && prepare_for_emergency_dump())) {







 433     return;
 434   }
 435   EventDumpReason event;
 436   if (event.should_commit()) {
 437     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 438     event.set_recordingId(-1);
 439     event.commit();
 440   }
 441   if (!exception_handler) {
 442     // OOM
 443     LeakProfiler::emit_events(max_jlong, false);
 444   }
 445   const int messages = MSGBIT(MSG_VM_ERROR);
 446   JfrRecorderService service;
 447   service.rotate(messages);
 448 }


 315   return emergency_dump_path != NULL ? open_exclusivly(emergency_dump_path) : invalid_fd;
 316 }
 317 
 318 const char* JfrEmergencyDump::build_dump_path(const char* repository_path) {
 319   return repository_path == NULL ? create_emergency_dump_path() : create_emergency_chunk_path(repository_path);
 320 }
 321 
 322 void JfrEmergencyDump::on_vm_error(const char* repository_path) {
 323   assert(repository_path != NULL, "invariant");
 324   ResourceMark rm;
 325   const fio_fd emergency_fd = emergency_dump_file_descriptor();
 326   if (emergency_fd != invalid_fd) {
 327     RepositoryIterator iterator(repository_path, strlen(repository_path));
 328     write_emergency_file(emergency_fd, iterator);
 329     os::close(emergency_fd);
 330   }
 331 }
 332 
 333 /*
 334 * We are just about to exit the VM, so we will be very aggressive
 335 * at this point in order to increase overall success of dumping jfr data.




 336 *
 337 * If we end up deadlocking in the attempt of dumping out jfr data,
 338 * we rely on the WatcherThread task "is_error_reported()",
 339 * to exit the VM after a hard-coded timeout (disallow WatcherThread to emergency dump).
 340 * This "safety net" somewhat explains the aggressiveness in this attempt.
 341 *
 342 */
 343 static bool prepare_for_emergency_dump() {
 344   if (JfrStream_lock->owned_by_self()) {
 345     // crashed during jfr rotation, disallow recursion
 346     return false;
 347   }
 348   Thread* const thread = Thread::current();
 349   if (thread->is_Watcher_thread()) {
 350     // need WatcherThread as a safeguard against potential deadlocks
 351     return false;
 352   }
 353 




 354 #ifdef ASSERT
 355   Mutex* owned_lock = thread->owned_locks();
 356   while (owned_lock != NULL) {
 357     Mutex* next = owned_lock->next();
 358     owned_lock->unlock();
 359     owned_lock = next;
 360   }
 361 #endif // ASSERT
 362 
 363   if (Threads_lock->owned_by_self()) {
 364     Threads_lock->unlock();
 365   }
 366 
 367   if (Module_lock->owned_by_self()) {
 368     Module_lock->unlock();
 369   }
 370 
 371   if (ClassLoaderDataGraph_lock->owned_by_self()) {
 372     ClassLoaderDataGraph_lock->unlock();
 373   }


 403   if (JfrMsg_lock->owned_by_self()) {
 404     JfrMsg_lock->unlock();
 405   }
 406 
 407   if (JfrBuffer_lock->owned_by_self()) {
 408     JfrBuffer_lock->unlock();
 409   }
 410 
 411   if (JfrStacktrace_lock->owned_by_self()) {
 412     JfrStacktrace_lock->unlock();
 413   }
 414   return true;
 415 }
 416 
 417 static volatile int jfr_shutdown_lock = 0;
 418 
 419 static bool guard_reentrancy() {
 420   return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
 421 }
 422 
 423 class JavaThreadInVM : public StackObj {
 424  private:
 425   JavaThread* _jt;
 426   JavaThreadState _original_state;
 427 
 428  public:
 429 
 430   JavaThreadInVM(Thread* thread) : _jt(NULL),
 431                                    _original_state(_thread_max_state) {
 432     if ((thread != NULL) && thread->is_Java_thread()) {
 433       _jt = (JavaThread*)thread;
 434       _original_state = _jt->thread_state();
 435       _jt->set_thread_state(_thread_in_vm);
 436     }
 437   }
 438 
 439   ~JavaThreadInVM() {
 440     if (_jt != NULL) {
 441       _jt->set_thread_state(_original_state);
 442     }
 443   }
 444 
 445 };
 446 
 447 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
 448   if (!guard_reentrancy()) {
 449     return;
 450   }
 451 
 452   // Ensure a JavaThread is _thread_in_vm when we make this call
 453   JavaThreadInVM jtivm(Thread::current_or_null_safe());
 454 
 455   if (!prepare_for_emergency_dump()) {
 456     return;
 457   }
 458   EventDumpReason event;
 459   if (event.should_commit()) {
 460     event.set_reason(exception_handler ? "Crash" : "Out of Memory");
 461     event.set_recordingId(-1);
 462     event.commit();
 463   }
 464   if (!exception_handler) {
 465     // OOM
 466     LeakProfiler::emit_events(max_jlong, false);
 467   }
 468   const int messages = MSGBIT(MSG_VM_ERROR);
 469   JfrRecorderService service;
 470   service.rotate(messages);
 471 }
< prev index next >