< prev index next >

src/share/vm/runtime/safepoint.cpp

Print this page
rev 8910 : full patch for jfr
   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  80 #ifdef COMPILER1
  81 #include "c1/c1_globals.hpp"
  82 #endif
  83 
  84 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  85 
  86 // --------------------------------------------------------------------------------------------------
  87 // Implementation of Safepoint begin/end
  88 
  89 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
  90 volatile int  SafepointSynchronize::_waiting_to_block = 0;
  91 volatile int SafepointSynchronize::_safepoint_counter = 0;
  92 int SafepointSynchronize::_current_jni_active_count = 0;
  93 long  SafepointSynchronize::_end_of_last_safepoint = 0;
  94 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
  95 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
  96 static bool timeout_error_printed = false;
  97 
  98 // Roll all threads forward to a safepoint and suspend them all
  99 void SafepointSynchronize::begin() {

 100 
 101   Thread* myThread = Thread::current();
 102   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 103 
 104   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
 105     _safepoint_begin_time = os::javaTimeNanos();
 106     _ts_of_current_safepoint = tty->time_stamp().seconds();
 107   }
 108 
 109 #if INCLUDE_ALL_GCS
 110   if (UseConcMarkSweepGC) {
 111     // In the future we should investigate whether CMS can use the
 112     // more-general mechanism below.  DLD (01/05).
 113     ConcurrentMarkSweepThread::synchronize(false);
 114   } else if (UseG1GC) {
 115     SuspendibleThreadSet::synchronize();
 116   }
 117 #endif // INCLUDE_ALL_GCS
 118 
 119   // By getting the Threads_lock, we assure that no threads are about to start or


 172   //     (on MP systems).  In order to avoid the overhead of issuing
 173   //     a memory barrier for each Java thread making native calls, each Java
 174   //     thread performs a write to a single memory page after changing
 175   //     the thread state.  The VM thread performs a sequence of
 176   //     mprotect OS calls which forces all previous writes from all
 177   //     Java threads to be serialized.  This is done in the
 178   //     os::serialize_thread_states() call.  This has proven to be
 179   //     much more efficient than executing a membar instruction
 180   //     on every call to native code.
 181   //  3. Running compiled Code
 182   //     Compiled code reads a global (Safepoint Polling) page that
 183   //     is set to fault if we are trying to get to a safepoint.
 184   //  4. Blocked
 185   //     A thread which is blocked will not be allowed to return from the
 186   //     block condition until the safepoint operation is complete.
 187   //  5. In VM or Transitioning between states
 188   //     If a Java thread is currently running in the VM or transitioning
 189   //     between states, the safepointing code will wait for the thread to
 190   //     block itself when it attempts transitions to a new state.
 191   //


 192   _state            = _synchronizing;
 193   OrderAccess::fence();
 194 
 195   // Flush all thread states to memory
 196   if (!UseMembar) {
 197     os::serialize_thread_states();
 198   }
 199 
 200   // Make interpreter safepoint aware
 201   Interpreter::notice_safepoints();
 202 
 203   if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
 204     // Make polling safepoint aware
 205     guarantee (PageArmed == 0, "invariant") ;
 206     PageArmed = 1 ;
 207     os::make_polling_page_unreadable();
 208   }
 209 
 210   // Consider using active_processor_count() ... but that call is expensive.
 211   int ncpus = os::processor_count() ;


 226   int steps = 0 ;
 227   while(still_running > 0) {
 228     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 229       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 230       ThreadSafepointState *cur_state = cur->safepoint_state();
 231       if (cur_state->is_running()) {
 232         cur_state->examine_state_of_thread();
 233         if (!cur_state->is_running()) {
 234            still_running--;
 235            // consider adjusting steps downward:
 236            //   steps = 0
 237            //   steps -= NNN
 238            //   steps >>= 1
 239            //   steps = MIN(steps, 2000-100)
 240            //   if (iterations != 0) steps -= NNN
 241         }
 242         if (TraceSafepoint && Verbose) cur_state->print();
 243       }
 244     }
 245 
 246     if (PrintSafepointStatistics && iterations == 0) {


 247       begin_statistics(nof_threads, still_running);
 248     }

 249 
 250     if (still_running > 0) {
 251       // Check for if it takes to long
 252       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 253         print_safepoint_timeout(_spinning_timeout);
 254       }
 255 
 256       // Spin to avoid context switching.
 257       // There's a tension between allowing the mutators to run (and rendezvous)
 258       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 259       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 260       // spinning by the VM thread on a saturated system can increase rendezvous latency.
 261       // Blocking or yielding incur their own penalties in the form of context switching
 262       // and the resultant loss of $ residency.
 263       //
 264       // Further complicating matters is that yield() does not work as naively expected
 265       // on many platforms -- yield() does not guarantee that any other ready threads
 266       // will run.   As such we revert yield_all() after some number of iterations.
 267       // Yield_all() is implemented as a short unconditional sleep on some platforms.
 268       // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping


 319         SpinPause() ;     // MP-Polite spin
 320       } else
 321       if (steps < DeferThrSuspendLoopCount) {
 322         os::NakedYield() ;
 323       } else {
 324         os::yield_all(steps) ;
 325         // Alternately, the VM thread could transiently depress its scheduling priority or
 326         // transiently increase the priority of the tardy mutator(s).
 327       }
 328 
 329       iterations ++ ;
 330     }
 331     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 332   }
 333   assert(still_running == 0, "sanity check");
 334 
 335   if (PrintSafepointStatistics) {
 336     update_statistics_on_spin_end();
 337   }
 338 











 339   // wait until all threads are stopped
 340   while (_waiting_to_block > 0) {
 341     if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
 342     if (!SafepointTimeout || timeout_error_printed) {
 343       Safepoint_lock->wait(true);  // true, means with no safepoint checks
 344     } else {
 345       // Compute remaining time
 346       jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 347 
 348       // If there is no remaining time, then there is an error
 349       if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 350         print_safepoint_timeout(_blocking_timeout);
 351       }
 352     }
 353   }
 354   assert(_waiting_to_block == 0, "sanity check");
 355 
 356 #ifndef PRODUCT
 357   if (SafepointTimeout) {
 358     jlong current_time = os::javaTimeNanos();
 359     if (safepoint_limit_time < current_time) {
 360       tty->print_cr("# SafepointSynchronize: Finished after "
 361                     INT64_FORMAT_W(6) " ms",
 362                     ((current_time - safepoint_limit_time) / MICROUNITS +
 363                      SafepointTimeoutDelay));
 364     }
 365   }
 366 #endif
 367 
 368   assert((_safepoint_counter & 0x1) == 0, "must be even");
 369   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 370   _safepoint_counter ++;
 371 
 372   // Record state
 373   _state = _synchronized;
 374 
 375   OrderAccess::fence();
 376 






 377 #ifdef ASSERT
 378   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 379     // make sure all the threads were visited
 380     assert(cur->was_visited_for_critical_count(), "missed a thread");
 381   }
 382 #endif // ASSERT
 383 
 384   // Update the count of active JNI critical regions
 385   GC_locker::set_jni_lock_count(_current_jni_active_count);
 386 
 387   if (TraceSafepoint) {
 388     VM_Operation *op = VMThread::vm_operation();
 389     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
 390   }
 391 
 392   RuntimeService::record_safepoint_synchronized();
 393   if (PrintSafepointStatistics) {
 394     update_statistics_on_sync_end(os::javaTimeNanos());
 395   }
 396 
 397   // Call stuff that needs to be run when a safepoint is just about to be completed

 398   do_cleanup_tasks();
 399 





 400   if (PrintSafepointStatistics) {
 401     // Record how much time spend on the above cleanup tasks
 402     update_statistics_on_cleanup_end(os::javaTimeNanos());
 403   }







 404 }
 405 
 406 // Wake up all threads, so they are ready to resume execution after the safepoint
 407 // operation has been carried out
 408 void SafepointSynchronize::end() {


 409 
 410   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 411   assert((_safepoint_counter & 0x1) == 1, "must be odd");
 412   _safepoint_counter ++;
 413   // memory fence isn't required here since an odd _safepoint_counter
 414   // value can do no harm and a fence is issued below anyway.
 415 
 416   DEBUG_ONLY(Thread* myThread = Thread::current();)
 417   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 418 
 419   if (PrintSafepointStatistics) {
 420     end_statistics(os::javaTimeNanos());
 421   }
 422 
 423 #ifdef ASSERT
 424   // A pending_exception cannot be installed during a safepoint.  The threads
 425   // may install an async exception after they come back from a safepoint into
 426   // pending_exception after they unblock.  But that should happen later.
 427   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 428     assert (!(cur->has_pending_exception() &&


 477     }
 478 
 479     RuntimeService::record_safepoint_end();
 480 
 481     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 482     // blocked in signal_thread_blocked
 483     Threads_lock->unlock();
 484 
 485   }
 486 #if INCLUDE_ALL_GCS
 487   // If there are any concurrent GC threads resume them.
 488   if (UseConcMarkSweepGC) {
 489     ConcurrentMarkSweepThread::desynchronize(false);
 490   } else if (UseG1GC) {
 491     SuspendibleThreadSet::desynchronize();
 492   }
 493 #endif // INCLUDE_ALL_GCS
 494   // record this time so VMThread can keep track how much time has elasped
 495   // since last safepoint.
 496   _end_of_last_safepoint = os::javaTimeMillis();





 497 }
 498 
 499 bool SafepointSynchronize::is_cleanup_needed() {
 500   // Need a safepoint if some inline cache buffers is non-empty
 501   if (!InlineCacheBuffer::is_empty()) return true;
 502   return false;
 503 }
 504 
 505 






 506 
 507 // Various cleaning tasks that should be done periodically at safepoints
 508 void SafepointSynchronize::do_cleanup_tasks() {
 509   {
 510     TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);


 511     ObjectSynchronizer::deflate_idle_monitors();

 512   }
 513 
 514   {
 515     TraceTime t2("updating inline caches", TraceSafepointCleanupTime);


 516     InlineCacheBuffer::update_inline_caches();

 517   }
 518   {
 519     TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);


 520     CompilationPolicy::policy()->do_safepoint_work();

 521   }
 522 
 523   {
 524     TraceTime t4("mark nmethods", TraceSafepointCleanupTime);


 525     NMethodSweeper::mark_active_nmethods();

 526   }
 527 
 528   if (SymbolTable::needs_rehashing()) {
 529     TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);


 530     SymbolTable::rehash_table();

 531   }
 532 
 533   if (StringTable::needs_rehashing()) {
 534     TraceTime t6("rehashing string table", TraceSafepointCleanupTime);


 535     StringTable::rehash_table();

 536   }
 537 
 538   // rotate log files?
 539   if (UseGCLogFileRotation) {


 540     gclog_or_tty->rotate_log(false);

 541   }
 542 
 543   {
 544     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 545     // make sure concurrent sweep is done
 546     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);


 547     ClassLoaderDataGraph::purge_if_needed();

 548   }
 549 }
 550 
 551 
 552 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 553   switch(state) {
 554   case _thread_in_native:
 555     // native threads are safe if they have no java stack or have walkable stack
 556     return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
 557 
 558    // blocked threads should have already have walkable stack
 559   case _thread_blocked:
 560     assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
 561     return true;
 562 
 563   default:
 564     return false;
 565   }
 566 }
 567 


   1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  80 #ifdef COMPILER1
  81 #include "c1/c1_globals.hpp"
  82 #endif
  83 
  84 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  85 
  86 // --------------------------------------------------------------------------------------------------
  87 // Implementation of Safepoint begin/end
  88 
  89 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
  90 volatile int  SafepointSynchronize::_waiting_to_block = 0;
  91 volatile int SafepointSynchronize::_safepoint_counter = 0;
  92 int SafepointSynchronize::_current_jni_active_count = 0;
  93 long  SafepointSynchronize::_end_of_last_safepoint = 0;
  94 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
  95 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
  96 static bool timeout_error_printed = false;
  97 
  98 // Roll all threads forward to a safepoint and suspend them all
  99 void SafepointSynchronize::begin() {
 100   EventSafepointBegin begin_event;
 101 
 102   Thread* myThread = Thread::current();
 103   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
 104 
 105   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
 106     _safepoint_begin_time = os::javaTimeNanos();
 107     _ts_of_current_safepoint = tty->time_stamp().seconds();
 108   }
 109 
 110 #if INCLUDE_ALL_GCS
 111   if (UseConcMarkSweepGC) {
 112     // In the future we should investigate whether CMS can use the
 113     // more-general mechanism below.  DLD (01/05).
 114     ConcurrentMarkSweepThread::synchronize(false);
 115   } else if (UseG1GC) {
 116     SuspendibleThreadSet::synchronize();
 117   }
 118 #endif // INCLUDE_ALL_GCS
 119 
 120   // By getting the Threads_lock, we assure that no threads are about to start or


 173   //     (on MP systems).  In order to avoid the overhead of issuing
 174   //     a memory barrier for each Java thread making native calls, each Java
 175   //     thread performs a write to a single memory page after changing
 176   //     the thread state.  The VM thread performs a sequence of
 177   //     mprotect OS calls which forces all previous writes from all
 178   //     Java threads to be serialized.  This is done in the
 179   //     os::serialize_thread_states() call.  This has proven to be
 180   //     much more efficient than executing a membar instruction
 181   //     on every call to native code.
 182   //  3. Running compiled Code
 183   //     Compiled code reads a global (Safepoint Polling) page that
 184   //     is set to fault if we are trying to get to a safepoint.
 185   //  4. Blocked
 186   //     A thread which is blocked will not be allowed to return from the
 187   //     block condition until the safepoint operation is complete.
 188   //  5. In VM or Transitioning between states
 189   //     If a Java thread is currently running in the VM or transitioning
 190   //     between states, the safepointing code will wait for the thread to
 191   //     block itself when it attempts transitions to a new state.
 192   //
 193   EventSafepointStateSynchronization sync_event;
 194   int initial_running = 0;
 195   _state            = _synchronizing;
 196   OrderAccess::fence();
 197 
 198   // Flush all thread states to memory
 199   if (!UseMembar) {
 200     os::serialize_thread_states();
 201   }
 202 
 203   // Make interpreter safepoint aware
 204   Interpreter::notice_safepoints();
 205 
 206   if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
 207     // Make polling safepoint aware
 208     guarantee (PageArmed == 0, "invariant") ;
 209     PageArmed = 1 ;
 210     os::make_polling_page_unreadable();
 211   }
 212 
 213   // Consider using active_processor_count() ... but that call is expensive.
 214   int ncpus = os::processor_count() ;


 229   int steps = 0 ;
 230   while(still_running > 0) {
 231     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 232       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 233       ThreadSafepointState *cur_state = cur->safepoint_state();
 234       if (cur_state->is_running()) {
 235         cur_state->examine_state_of_thread();
 236         if (!cur_state->is_running()) {
 237            still_running--;
 238            // consider adjusting steps downward:
 239            //   steps = 0
 240            //   steps -= NNN
 241            //   steps >>= 1
 242            //   steps = MIN(steps, 2000-100)
 243            //   if (iterations != 0) steps -= NNN
 244         }
 245         if (TraceSafepoint && Verbose) cur_state->print();
 246       }
 247     }
 248 
 249     if (iterations == 0) {
 250       initial_running = still_running;
 251       if (PrintSafepointStatistics) {
 252         begin_statistics(nof_threads, still_running);
 253       }
 254     }
 255 
 256     if (still_running > 0) {
 257       // Check for if it takes to long
 258       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 259         print_safepoint_timeout(_spinning_timeout);
 260       }
 261 
 262       // Spin to avoid context switching.
 263       // There's a tension between allowing the mutators to run (and rendezvous)
 264       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 265       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 266       // spinning by the VM thread on a saturated system can increase rendezvous latency.
 267       // Blocking or yielding incur their own penalties in the form of context switching
 268       // and the resultant loss of $ residency.
 269       //
 270       // Further complicating matters is that yield() does not work as naively expected
 271       // on many platforms -- yield() does not guarantee that any other ready threads
 272       // will run.   As such we revert yield_all() after some number of iterations.
 273       // Yield_all() is implemented as a short unconditional sleep on some platforms.
 274       // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping


 325         SpinPause() ;     // MP-Polite spin
 326       } else
 327       if (steps < DeferThrSuspendLoopCount) {
 328         os::NakedYield() ;
 329       } else {
 330         os::yield_all(steps) ;
 331         // Alternately, the VM thread could transiently depress its scheduling priority or
 332         // transiently increase the priority of the tardy mutator(s).
 333       }
 334 
 335       iterations ++ ;
 336     }
 337     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 338   }
 339   assert(still_running == 0, "sanity check");
 340 
 341   if (PrintSafepointStatistics) {
 342     update_statistics_on_spin_end();
 343   }
 344 
 345   if (sync_event.should_commit()) {
 346     // Group this event together with the ones committed after the counter is increased
 347     sync_event.set_safepointId(safepoint_counter() + 1);
 348     sync_event.set_initialThreadCount(initial_running);
 349     sync_event.set_runningThreadCount(_waiting_to_block);
 350     sync_event.set_iterations(iterations);
 351     sync_event.commit();
 352   }
 353 
 354   EventSafepointWaitBlocked wait_blocked_event;
 355   int initial_waiting_to_block = _waiting_to_block;
 356   // wait until all threads are stopped
 357   while (_waiting_to_block > 0) {
 358     if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
 359     if (!SafepointTimeout || timeout_error_printed) {
 360       Safepoint_lock->wait(true);  // true, means with no safepoint checks
 361     } else {
 362       // Compute remaining time
 363       jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 364 
 365       // If there is no remaining time, then there is an error
 366       if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 367         print_safepoint_timeout(_blocking_timeout);
 368       }
 369     }
 370   }
 371   assert(_waiting_to_block == 0, "sanity check");
 372 
 373 #ifndef PRODUCT
 374   if (SafepointTimeout) {
 375     jlong current_time = os::javaTimeNanos();
 376     if (safepoint_limit_time < current_time) {
 377       tty->print_cr("# SafepointSynchronize: Finished after "
 378                     INT64_FORMAT_W(6) " ms",
 379                     ((current_time - safepoint_limit_time) / MICROUNITS +
 380                      SafepointTimeoutDelay));
 381     }
 382   }
 383 #endif
 384 
 385   assert((_safepoint_counter & 0x1) == 0, "must be even");
 386   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 387   _safepoint_counter ++;
 388 
 389   // Record state
 390   _state = _synchronized;
 391 
 392   OrderAccess::fence();
 393 
 394   if (wait_blocked_event.should_commit()) {
 395     wait_blocked_event.set_safepointId(safepoint_counter());
 396     wait_blocked_event.set_runningThreadCount(initial_waiting_to_block);
 397     wait_blocked_event.commit();
 398   }
 399 
 400 #ifdef ASSERT
 401   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 402     // make sure all the threads were visited
 403     assert(cur->was_visited_for_critical_count(), "missed a thread");
 404   }
 405 #endif // ASSERT
 406 
 407   // Update the count of active JNI critical regions
 408   GC_locker::set_jni_lock_count(_current_jni_active_count);
 409 
 410   if (TraceSafepoint) {
 411     VM_Operation *op = VMThread::vm_operation();
 412     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
 413   }
 414 
 415   RuntimeService::record_safepoint_synchronized();
 416   if (PrintSafepointStatistics) {
 417     update_statistics_on_sync_end(os::javaTimeNanos());
 418   }
 419 
 420   // Call stuff that needs to be run when a safepoint is just about to be completed
 421   EventSafepointCleanup cleanup_event;
 422   do_cleanup_tasks();
 423 
 424   if (cleanup_event.should_commit()) {
 425     cleanup_event.set_safepointId(safepoint_counter());
 426     cleanup_event.commit();
 427   }
 428 
 429   if (PrintSafepointStatistics) {
 430     // Record how much time spend on the above cleanup tasks
 431     update_statistics_on_cleanup_end(os::javaTimeNanos());
 432   }
 433 
 434   if (begin_event.should_commit()) {
 435     begin_event.set_safepointId(safepoint_counter());
 436     begin_event.set_totalThreadCount(nof_threads);
 437     begin_event.set_jniCriticalThreadCount(_current_jni_active_count);
 438     begin_event.commit();
 439   }
 440 }
 441 
 442 // Wake up all threads, so they are ready to resume execution after the safepoint
 443 // operation has been carried out
 444 void SafepointSynchronize::end() {
 445   EventSafepointEnd event;
 446   int safepoint_id = safepoint_counter(); // Keep the odd counter as "id"
 447 
 448   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 449   assert((_safepoint_counter & 0x1) == 1, "must be odd");
 450   _safepoint_counter ++;
 451   // memory fence isn't required here since an odd _safepoint_counter
 452   // value can do no harm and a fence is issued below anyway.
 453 
 454   DEBUG_ONLY(Thread* myThread = Thread::current();)
 455   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 456 
 457   if (PrintSafepointStatistics) {
 458     end_statistics(os::javaTimeNanos());
 459   }
 460 
 461 #ifdef ASSERT
 462   // A pending_exception cannot be installed during a safepoint.  The threads
 463   // may install an async exception after they come back from a safepoint into
 464   // pending_exception after they unblock.  But that should happen later.
 465   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 466     assert (!(cur->has_pending_exception() &&


 515     }
 516 
 517     RuntimeService::record_safepoint_end();
 518 
 519     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 520     // blocked in signal_thread_blocked
 521     Threads_lock->unlock();
 522 
 523   }
 524 #if INCLUDE_ALL_GCS
 525   // If there are any concurrent GC threads resume them.
 526   if (UseConcMarkSweepGC) {
 527     ConcurrentMarkSweepThread::desynchronize(false);
 528   } else if (UseG1GC) {
 529     SuspendibleThreadSet::desynchronize();
 530   }
 531 #endif // INCLUDE_ALL_GCS
 532   // record this time so VMThread can keep track how much time has elasped
 533   // since last safepoint.
 534   _end_of_last_safepoint = os::javaTimeMillis();
 535 
 536   if (event.should_commit()) {
 537     event.set_safepointId(safepoint_id);
 538     event.commit();
 539   }
 540 }
 541 
 542 bool SafepointSynchronize::is_cleanup_needed() {
 543   // Need a safepoint if some inline cache buffers is non-empty
 544   if (!InlineCacheBuffer::is_empty()) return true;
 545   return false;
 546 }
 547 
 548 static void event_safepoint_cleanup_task_commit(EventSafepointCleanupTask& event, const char* name) {
 549   if (event.should_commit()) {
 550     event.set_safepointId(SafepointSynchronize::safepoint_counter());
 551     event.set_name(name);
 552     event.commit();
 553   }
 554 }
 555 
 556 // Various cleaning tasks that should be done periodically at safepoints
 557 void SafepointSynchronize::do_cleanup_tasks() {
 558   {
 559     const char* name = "deflating idle monitors";
 560     EventSafepointCleanupTask event;
 561     TraceTime t1(name, TraceSafepointCleanupTime);
 562     ObjectSynchronizer::deflate_idle_monitors();
 563     event_safepoint_cleanup_task_commit(event, name);
 564   }
 565 
 566   {
 567     const char* name = "updating inline caches";
 568     EventSafepointCleanupTask event;
 569     TraceTime t2(name, TraceSafepointCleanupTime);
 570     InlineCacheBuffer::update_inline_caches();
 571     event_safepoint_cleanup_task_commit(event, name);
 572   }
 573   {
 574     const char* name = "compilation policy safepoint handler";
 575     EventSafepointCleanupTask event;
 576     TraceTime t3(name, TraceSafepointCleanupTime);
 577     CompilationPolicy::policy()->do_safepoint_work();
 578     event_safepoint_cleanup_task_commit(event, name);
 579   }
 580 
 581   {
 582     const char* name = "mark nmethods";
 583     EventSafepointCleanupTask event;
 584     TraceTime t4(name, TraceSafepointCleanupTime);
 585     NMethodSweeper::mark_active_nmethods();
 586     event_safepoint_cleanup_task_commit(event, name);
 587   }
 588 
 589   if (SymbolTable::needs_rehashing()) {
 590     const char* name = "rehashing symbol table";
 591     EventSafepointCleanupTask event;
 592     TraceTime t5(name, TraceSafepointCleanupTime);
 593     SymbolTable::rehash_table();
 594     event_safepoint_cleanup_task_commit(event, name);
 595   }
 596 
 597   if (StringTable::needs_rehashing()) {
 598     const char* name = "rehashing string table";
 599     EventSafepointCleanupTask event;
 600     TraceTime t6(name, TraceSafepointCleanupTime);
 601     StringTable::rehash_table();
 602     event_safepoint_cleanup_task_commit(event, name);
 603   }
 604 
 605   // rotate log files?
 606   if (UseGCLogFileRotation) {
 607     const char* name = "rotate gc log";
 608     EventSafepointCleanupTask event;
 609     gclog_or_tty->rotate_log(false);
 610     event_safepoint_cleanup_task_commit(event, name);
 611   }
 612 
 613   {
 614     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 615     // make sure concurrent sweep is done
 616     const char* name = "purging class loader data graph";
 617     EventSafepointCleanupTask event;
 618     TraceTime t7(name, TraceSafepointCleanupTime);
 619     ClassLoaderDataGraph::purge_if_needed();
 620     event_safepoint_cleanup_task_commit(event, name);
 621   }
 622 }
 623 
 624 
 625 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 626   switch(state) {
 627   case _thread_in_native:
 628     // native threads are safe if they have no java stack or have walkable stack
 629     return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
 630 
 631    // blocked threads should have already have walkable stack
 632   case _thread_blocked:
 633     assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
 634     return true;
 635 
 636   default:
 637     return false;
 638   }
 639 }
 640 


< prev index next >