< prev index next >

src/share/vm/runtime/safepoint.cpp

Print this page




  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/symbol.hpp"
  42 #include "runtime/atomic.inline.hpp"
  43 #include "runtime/compilationPolicy.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/mutexLocker.hpp"
  48 #include "runtime/orderAccess.inline.hpp"
  49 #include "runtime/osThread.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/stubCodeGenerator.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/sweeper.hpp"
  55 #include "runtime/synchronizer.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "services/runtimeService.hpp"


  58 #include "utilities/events.hpp"
  59 #include "utilities/macros.hpp"
  60 #if INCLUDE_ALL_GCS
  61 #include "gc/cms/concurrentMarkSweepThread.hpp"
  62 #include "gc/g1/suspendibleThreadSet.hpp"
  63 #endif // INCLUDE_ALL_GCS
  64 #ifdef COMPILER1
  65 #include "c1/c1_globals.hpp"
  66 #endif
  67 
  68 // --------------------------------------------------------------------------------------------------
  69 // Implementation of Safepoint begin/end
  70 
  71 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
  72 volatile int  SafepointSynchronize::_waiting_to_block = 0;
  73 volatile int SafepointSynchronize::_safepoint_counter = 0;
  74 int SafepointSynchronize::_current_jni_active_count = 0;
  75 long  SafepointSynchronize::_end_of_last_safepoint = 0;
  76 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
  77 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
  78 static bool timeout_error_printed = false;
  79 
  80 // Roll all threads forward to a safepoint and suspend them all
  81 void SafepointSynchronize::begin() {
  82 
  83   Thread* myThread = Thread::current();
  84   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
  85 
  86   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
  87     _safepoint_begin_time = os::javaTimeNanos();
  88     _ts_of_current_safepoint = tty->time_stamp().seconds();
  89   }
  90 
  91 #if INCLUDE_ALL_GCS
  92   if (UseConcMarkSweepGC) {
  93     // In the future we should investigate whether CMS can use the
  94     // more-general mechanism below.  DLD (01/05).
  95     ConcurrentMarkSweepThread::synchronize(false);
  96   } else if (UseG1GC) {
  97     SuspendibleThreadSet::synchronize();
  98   }
  99 #endif // INCLUDE_ALL_GCS
 100 
 101   // By getting the Threads_lock, we assure that no threads are about to start or
 102   // exit. It is released again in SafepointSynchronize::end().
 103   Threads_lock->lock();
 104 
 105   assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");


 152   //     (on MP systems).  In order to avoid the overhead of issuing
 153   //     a memory barrier for each Java thread making native calls, each Java
 154   //     thread performs a write to a single memory page after changing
 155   //     the thread state.  The VM thread performs a sequence of
 156   //     mprotect OS calls which forces all previous writes from all
 157   //     Java threads to be serialized.  This is done in the
 158   //     os::serialize_thread_states() call.  This has proven to be
 159   //     much more efficient than executing a membar instruction
 160   //     on every call to native code.
 161   //  3. Running compiled Code
 162   //     Compiled code reads a global (Safepoint Polling) page that
 163   //     is set to fault if we are trying to get to a safepoint.
 164   //  4. Blocked
 165   //     A thread which is blocked will not be allowed to return from the
 166   //     block condition until the safepoint operation is complete.
 167   //  5. In VM or Transitioning between states
 168   //     If a Java thread is currently running in the VM or transitioning
 169   //     between states, the safepointing code will wait for the thread to
 170   //     block itself when it attempts transitions to a new state.
 171   //




 172   _state            = _synchronizing;
 173   OrderAccess::fence();
 174 
 175   // Flush all thread states to memory
 176   if (!UseMembar) {
 177     os::serialize_thread_states();
 178   }
 179 
 180   // Make interpreter safepoint aware
 181   Interpreter::notice_safepoints();
 182 
 183   if (DeferPollingPageLoopCount < 0) {
 184     // Make polling safepoint aware
 185     guarantee (PageArmed == 0, "invariant") ;
 186     PageArmed = 1 ;
 187     os::make_polling_page_unreadable();
 188   }
 189 
 190   // Consider using active_processor_count() ... but that call is expensive.
 191   int ncpus = os::processor_count() ;


 209       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 210       ThreadSafepointState *cur_state = cur->safepoint_state();
 211       if (cur_state->is_running()) {
 212         cur_state->examine_state_of_thread();
 213         if (!cur_state->is_running()) {
 214            still_running--;
 215            // consider adjusting steps downward:
 216            //   steps = 0
 217            //   steps -= NNN
 218            //   steps >>= 1
 219            //   steps = MIN(steps, 2000-100)
 220            //   if (iterations != 0) steps -= NNN
 221         }
 222         if (log_is_enabled(Trace, safepoint)) {
 223           ResourceMark rm;
 224           cur_state->print_on(LogHandle(safepoint)::debug_stream());
 225         }
 226       }
 227     }
 228 
 229     if (PrintSafepointStatistics && iterations == 0) {


 230       begin_statistics(nof_threads, still_running);
 231     }

 232 
 233     if (still_running > 0) {
 234       // Check for if it takes to long
 235       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 236         print_safepoint_timeout(_spinning_timeout);
 237       }
 238 
 239       // Spin to avoid context switching.
 240       // There's a tension between allowing the mutators to run (and rendezvous)
 241       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 242       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 243       // spinning by the VM thread on a saturated system can increase rendezvous latency.
 244       // Blocking or yielding incur their own penalties in the form of context switching
 245       // and the resultant loss of $ residency.
 246       //
 247       // Further complicating matters is that yield() does not work as naively expected
 248       // on many platforms -- yield() does not guarantee that any other ready threads
 249       // will run.   As such we revert to naked_short_sleep() after some number of iterations.
 250       // nakes_short_sleep() is implemented as a short unconditional sleep.
 251       // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping


 300       ++steps ;
 301       if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
 302         SpinPause() ;     // MP-Polite spin
 303       } else
 304       if (steps < DeferThrSuspendLoopCount) {
 305         os::naked_yield() ;
 306       } else {
 307         os::naked_short_sleep(1);
 308       }
 309 
 310       iterations ++ ;
 311     }
 312     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 313   }
 314   assert(still_running == 0, "sanity check");
 315 
 316   if (PrintSafepointStatistics) {
 317     update_statistics_on_spin_end();
 318   }
 319 









 320   // wait until all threads are stopped




 321   while (_waiting_to_block > 0) {
 322     log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
 323     if (!SafepointTimeout || timeout_error_printed) {
 324       Safepoint_lock->wait(true);  // true, means with no safepoint checks
 325     } else {
 326       // Compute remaining time
 327       jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 328 
 329       // If there is no remaining time, then there is an error
 330       if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 331         print_safepoint_timeout(_blocking_timeout);
 332       }
 333     }
 334   }
 335   assert(_waiting_to_block == 0, "sanity check");
 336 
 337 #ifndef PRODUCT
 338   if (SafepointTimeout) {
 339     jlong current_time = os::javaTimeNanos();
 340     if (safepoint_limit_time < current_time) {
 341       tty->print_cr("# SafepointSynchronize: Finished after "
 342                     INT64_FORMAT_W(6) " ms",
 343                     ((current_time - safepoint_limit_time) / MICROUNITS +
 344                      (jlong)SafepointTimeoutDelay));
 345     }
 346   }
 347 #endif
 348 
 349   assert((_safepoint_counter & 0x1) == 0, "must be even");
 350   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 351   _safepoint_counter ++;
 352 
 353   // Record state
 354   _state = _synchronized;
 355 
 356   OrderAccess::fence();






 357 
 358 #ifdef ASSERT
 359   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 360     // make sure all the threads were visited
 361     assert(cur->was_visited_for_critical_count(), "missed a thread");
 362   }
 363 #endif // ASSERT
 364 
 365   // Update the count of active JNI critical regions
 366   GCLocker::set_jni_lock_count(_current_jni_active_count);
 367 
 368   if (log_is_enabled(Debug, safepoint)) {
 369     VM_Operation *op = VMThread::vm_operation();
 370     log_debug(safepoint)("Entering safepoint region: %s",
 371                          (op != NULL) ? op->name() : "no vm operation");
 372   }
 373 
 374   RuntimeService::record_safepoint_synchronized();
 375   if (PrintSafepointStatistics) {
 376     update_statistics_on_sync_end(os::javaTimeNanos());
 377   }
 378 
 379   // Call stuff that needs to be run when a safepoint is just about to be completed


 380   do_cleanup_tasks();





 381 
 382   if (PrintSafepointStatistics) {
 383     // Record how much time spend on the above cleanup tasks
 384     update_statistics_on_cleanup_end(os::javaTimeNanos());
 385   }






 386 }
 387 
 388 // Wake up all threads, so they are ready to resume execution after the safepoint
 389 // operation has been carried out
 390 void SafepointSynchronize::end() {


 391 
 392   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 393   assert((_safepoint_counter & 0x1) == 1, "must be odd");
 394   _safepoint_counter ++;
 395   // memory fence isn't required here since an odd _safepoint_counter
 396   // value can do no harm and a fence is issued below anyway.
 397 
 398   DEBUG_ONLY(Thread* myThread = Thread::current();)
 399   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 400 
 401   if (PrintSafepointStatistics) {
 402     end_statistics(os::javaTimeNanos());
 403   }
 404 
 405 #ifdef ASSERT
 406   // A pending_exception cannot be installed during a safepoint.  The threads
 407   // may install an async exception after they come back from a safepoint into
 408   // pending_exception after they unblock.  But that should happen later.
 409   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 410     assert (!(cur->has_pending_exception() &&


 457     }
 458 
 459     RuntimeService::record_safepoint_end();
 460 
 461     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 462     // blocked in signal_thread_blocked
 463     Threads_lock->unlock();
 464 
 465   }
 466 #if INCLUDE_ALL_GCS
 467   // If there are any concurrent GC threads resume them.
 468   if (UseConcMarkSweepGC) {
 469     ConcurrentMarkSweepThread::desynchronize(false);
 470   } else if (UseG1GC) {
 471     SuspendibleThreadSet::desynchronize();
 472   }
 473 #endif // INCLUDE_ALL_GCS
 474   // record this time so VMThread can keep track how much time has elapsed
 475   // since last safepoint.
 476   _end_of_last_safepoint = os::javaTimeMillis();





 477 }
 478 
 479 bool SafepointSynchronize::is_cleanup_needed() {
 480   // Need a safepoint if some inline cache buffers is non-empty
 481   if (!InlineCacheBuffer::is_empty()) return true;
 482   return false;
 483 }
 484 
 485 
 486 
 487 // Various cleaning tasks that should be done periodically at safepoints
 488 void SafepointSynchronize::do_cleanup_tasks() {
 489   {
 490     TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime);


 491     ObjectSynchronizer::deflate_idle_monitors();





 492   }
 493 
 494   {
 495     TraceTime t2("updating inline caches", TraceSafepointCleanupTime);


 496     InlineCacheBuffer::update_inline_caches();





 497   }
 498   {
 499     TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);


 500     CompilationPolicy::policy()->do_safepoint_work();





 501   }
 502 
 503   {
 504     TraceTime t4("mark nmethods", TraceSafepointCleanupTime);


 505     NMethodSweeper::mark_active_nmethods();





 506   }
 507 
 508   if (SymbolTable::needs_rehashing()) {
 509     TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime);


 510     SymbolTable::rehash_table();





 511   }
 512 
 513   if (StringTable::needs_rehashing()) {
 514     TraceTime t6("rehashing string table", TraceSafepointCleanupTime);


 515     StringTable::rehash_table();





 516   }
 517 
 518   {
 519     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 520     // make sure concurrent sweep is done
 521     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);


 522     ClassLoaderDataGraph::purge_if_needed();





 523   }
 524 }
 525 
 526 
 527 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 528   switch(state) {
 529   case _thread_in_native:
 530     // native threads are safe if they have no java stack or have walkable stack
 531     return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
 532 
 533    // blocked threads should have already have walkable stack
 534   case _thread_blocked:
 535     assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
 536     return true;
 537 
 538   default:
 539     return false;
 540   }
 541 }
 542 




  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/symbol.hpp"
  42 #include "runtime/atomic.inline.hpp"
  43 #include "runtime/compilationPolicy.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/mutexLocker.hpp"
  48 #include "runtime/orderAccess.inline.hpp"
  49 #include "runtime/osThread.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/stubCodeGenerator.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "runtime/sweeper.hpp"
  55 #include "runtime/synchronizer.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "services/runtimeService.hpp"
  58 #include "trace/tracing.hpp"
  59 #include "trace/traceMacros.hpp"
  60 #include "utilities/events.hpp"
  61 #include "utilities/macros.hpp"
  62 #if INCLUDE_ALL_GCS
  63 #include "gc/cms/concurrentMarkSweepThread.hpp"
  64 #include "gc/g1/suspendibleThreadSet.hpp"
  65 #endif // INCLUDE_ALL_GCS
  66 #ifdef COMPILER1
  67 #include "c1/c1_globals.hpp"
  68 #endif
  69 
  70 // --------------------------------------------------------------------------------------------------
  71 // Implementation of Safepoint begin/end
  72 
  73 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
  74 volatile int  SafepointSynchronize::_waiting_to_block = 0;
  75 volatile int SafepointSynchronize::_safepoint_counter = 0;
  76 int SafepointSynchronize::_current_jni_active_count = 0;
  77 long  SafepointSynchronize::_end_of_last_safepoint = 0;
  78 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
  79 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
  80 static bool timeout_error_printed = false;
  81 
  82 // Roll all threads forward to a safepoint and suspend them all
  83 void SafepointSynchronize::begin() {
  84   EventSafepointBegin begin_event;
  85   Thread* myThread = Thread::current();
  86   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");

  87   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
  88     _safepoint_begin_time = os::javaTimeNanos();
  89     _ts_of_current_safepoint = tty->time_stamp().seconds();
  90   }
  91 
  92 #if INCLUDE_ALL_GCS
  93   if (UseConcMarkSweepGC) {
  94     // In the future we should investigate whether CMS can use the
  95     // more-general mechanism below.  DLD (01/05).
  96     ConcurrentMarkSweepThread::synchronize(false);
  97   } else if (UseG1GC) {
  98     SuspendibleThreadSet::synchronize();
  99   }
 100 #endif // INCLUDE_ALL_GCS
 101 
 102   // By getting the Threads_lock, we assure that no threads are about to start or
 103   // exit. It is released again in SafepointSynchronize::end().
 104   Threads_lock->lock();
 105 
 106   assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");


 153   //     (on MP systems).  In order to avoid the overhead of issuing
 154   //     a memory barrier for each Java thread making native calls, each Java
 155   //     thread performs a write to a single memory page after changing
 156   //     the thread state.  The VM thread performs a sequence of
 157   //     mprotect OS calls which forces all previous writes from all
 158   //     Java threads to be serialized.  This is done in the
 159   //     os::serialize_thread_states() call.  This has proven to be
 160   //     much more efficient than executing a membar instruction
 161   //     on every call to native code.
 162   //  3. Running compiled Code
 163   //     Compiled code reads a global (Safepoint Polling) page that
 164   //     is set to fault if we are trying to get to a safepoint.
 165   //  4. Blocked
 166   //     A thread which is blocked will not be allowed to return from the
 167   //     block condition until the safepoint operation is complete.
 168   //  5. In VM or Transitioning between states
 169   //     If a Java thread is currently running in the VM or transitioning
 170   //     between states, the safepointing code will wait for the thread to
 171   //     block itself when it attempts transitions to a new state.
 172   //
 173   {
 174     EventSafepointStateSync sync_event;
 175     int initial_running = 0;
 176 
 177     _state            = _synchronizing;
 178     OrderAccess::fence();
 179 
 180     // Flush all thread states to memory
 181     if (!UseMembar) {
 182       os::serialize_thread_states();
 183     }
 184 
 185     // Make interpreter safepoint aware
 186     Interpreter::notice_safepoints();
 187 
 188     if (DeferPollingPageLoopCount < 0) {
 189       // Make polling safepoint aware
 190       guarantee (PageArmed == 0, "invariant") ;
 191       PageArmed = 1 ;
 192       os::make_polling_page_unreadable();
 193     }
 194 
 195     // Consider using active_processor_count() ... but that call is expensive.
 196     int ncpus = os::processor_count() ;


 214         assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
 215         ThreadSafepointState *cur_state = cur->safepoint_state();
 216         if (cur_state->is_running()) {
 217           cur_state->examine_state_of_thread();
 218           if (!cur_state->is_running()) {
 219             still_running--;
 220             // consider adjusting steps downward:
 221             //   steps = 0
 222             //   steps -= NNN
 223             //   steps >>= 1
 224             //   steps = MIN(steps, 2000-100)
 225             //   if (iterations != 0) steps -= NNN
 226           }
 227           if (log_is_enabled(Trace, safepoint)) {
 228             ResourceMark rm;
 229             cur_state->print_on(LogHandle(safepoint)::debug_stream());
 230           }
 231         }
 232       }
 233 
 234       if (iterations == 0) {
 235         initial_running = still_running;
 236         if (PrintSafepointStatistics) {
 237           begin_statistics(nof_threads, still_running);
 238         }
 239       }
 240 
 241       if (still_running > 0) {
 242         // Check for if it takes to long
 243         if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
 244           print_safepoint_timeout(_spinning_timeout);
 245         }
 246 
 247         // Spin to avoid context switching.
 248         // There's a tension between allowing the mutators to run (and rendezvous)
 249         // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
 250         // a mutator might otherwise use profitably to reach a safepoint.  Excessive
 251         // spinning by the VM thread on a saturated system can increase rendezvous latency.
 252         // Blocking or yielding incur their own penalties in the form of context switching
 253         // and the resultant loss of $ residency.
 254         //
 255         // Further complicating matters is that yield() does not work as naively expected
 256         // on many platforms -- yield() does not guarantee that any other ready threads
 257         // will run.   As such we revert to naked_short_sleep() after some number of iterations.
 258         // nakes_short_sleep() is implemented as a short unconditional sleep.
 259         // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping


 308         ++steps ;
 309         if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
 310           SpinPause() ;     // MP-Polite spin
 311         } else
 312           if (steps < DeferThrSuspendLoopCount) {
 313             os::naked_yield() ;
 314           } else {
 315             os::naked_short_sleep(1);
 316           }
 317 
 318         iterations ++ ;
 319       }
 320       assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
 321     }
 322     assert(still_running == 0, "sanity check");
 323 
 324     if (PrintSafepointStatistics) {
 325       update_statistics_on_spin_end();
 326     }
 327 
 328     if (sync_event.should_commit()) {
 329       sync_event.set_safepointId(safepoint_counter());
 330       sync_event.set_initialThreadCount(initial_running);
 331       sync_event.set_runningThreadCount(_waiting_to_block);
 332       sync_event.set_iterations(iterations);
 333       sync_event.commit();
 334     }
 335   } //EventSafepointStateSync
 336 
 337   // wait until all threads are stopped
 338   {
 339     EventSafepointWaitBlocked wait_blocked_event;
 340     int initial_waiting_to_block = _waiting_to_block;
 341 
 342     while (_waiting_to_block > 0) {
 343       log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
 344       if (!SafepointTimeout || timeout_error_printed) {
 345         Safepoint_lock->wait(true);  // true, means with no safepoint checks
 346       } else {
 347         // Compute remaining time
 348         jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
 349 
 350         // If there is no remaining time, then there is an error
 351         if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
 352           print_safepoint_timeout(_blocking_timeout);
 353         }
 354       }
 355     }
 356     assert(_waiting_to_block == 0, "sanity check");
 357 
 358 #ifndef PRODUCT
 359     if (SafepointTimeout) {
 360       jlong current_time = os::javaTimeNanos();
 361       if (safepoint_limit_time < current_time) {
 362         tty->print_cr("# SafepointSynchronize: Finished after "
 363                       INT64_FORMAT_W(6) " ms",
 364                       ((current_time - safepoint_limit_time) / MICROUNITS +
 365                        (jlong)SafepointTimeoutDelay));
 366       }
 367     }
 368 #endif
 369 
 370     assert((_safepoint_counter & 0x1) == 0, "must be even");
 371     assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 372     _safepoint_counter ++;
 373 
 374     // Record state
 375     _state = _synchronized;
 376 
 377     OrderAccess::fence();
 378     if (wait_blocked_event.should_commit()) {
 379       wait_blocked_event.set_safepointId(safepoint_counter());
 380       wait_blocked_event.set_runningThreadCount(initial_waiting_to_block);
 381       wait_blocked_event.commit();
 382     }
 383   } // EventSafepointWaitBlocked
 384 
 385 #ifdef ASSERT
 386   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
 387     // make sure all the threads were visited
 388     assert(cur->was_visited_for_critical_count(), "missed a thread");
 389   }
 390 #endif // ASSERT
 391 
 392   // Update the count of active JNI critical regions
 393   GCLocker::set_jni_lock_count(_current_jni_active_count);
 394 
 395   if (log_is_enabled(Debug, safepoint)) {
 396     VM_Operation *op = VMThread::vm_operation();
 397     log_debug(safepoint)("Entering safepoint region: %s",
 398                          (op != NULL) ? op->name() : "no vm operation");
 399   }
 400 
 401   RuntimeService::record_safepoint_synchronized();
 402   if (PrintSafepointStatistics) {
 403     update_statistics_on_sync_end(os::javaTimeNanos());
 404   }
 405 
 406   // Call stuff that needs to be run when a safepoint is just about to be completed
 407   {
 408     EventSafepointCleanup cleanup_event;
 409     do_cleanup_tasks();
 410     if (cleanup_event.should_commit()) {
 411       cleanup_event.set_safepointId(safepoint_counter());
 412       cleanup_event.commit();
 413     }
 414   }
 415 
 416   if (PrintSafepointStatistics) {
 417     // Record how much time spend on the above cleanup tasks
 418     update_statistics_on_cleanup_end(os::javaTimeNanos());
 419   }
 420   if (begin_event.should_commit()) {
 421     begin_event.set_safepointId(safepoint_counter());
 422     begin_event.set_totalThreadCount(nof_threads);
 423     begin_event.set_jniCriticalThreadCount(_current_jni_active_count);
 424     begin_event.commit();
 425   }
 426 }
 427 
 428 // Wake up all threads, so they are ready to resume execution after the safepoint
 429 // operation has been carried out
 430 void SafepointSynchronize::end() {
 431   EventSafepointEnd event;
 432   int safepoint_id = safepoint_counter(); // Keep the odd counter as "id"
 433 
 434   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
 435   assert((_safepoint_counter & 0x1) == 1, "must be odd");
 436   _safepoint_counter ++;
 437   // memory fence isn't required here since an odd _safepoint_counter
 438   // value can do no harm and a fence is issued below anyway.
 439 
 440   DEBUG_ONLY(Thread* myThread = Thread::current();)
 441   assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
 442 
 443   if (PrintSafepointStatistics) {
 444     end_statistics(os::javaTimeNanos());
 445   }
 446 
 447 #ifdef ASSERT
 448   // A pending_exception cannot be installed during a safepoint.  The threads
 449   // may install an async exception after they come back from a safepoint into
 450   // pending_exception after they unblock.  But that should happen later.
 451   for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
 452     assert (!(cur->has_pending_exception() &&


 499     }
 500 
 501     RuntimeService::record_safepoint_end();
 502 
 503     // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
 504     // blocked in signal_thread_blocked
 505     Threads_lock->unlock();
 506 
 507   }
 508 #if INCLUDE_ALL_GCS
 509   // If there are any concurrent GC threads resume them.
 510   if (UseConcMarkSweepGC) {
 511     ConcurrentMarkSweepThread::desynchronize(false);
 512   } else if (UseG1GC) {
 513     SuspendibleThreadSet::desynchronize();
 514   }
 515 #endif // INCLUDE_ALL_GCS
 516   // record this time so VMThread can keep track how much time has elapsed
 517   // since last safepoint.
 518   _end_of_last_safepoint = os::javaTimeMillis();
 519 
 520   if (event.should_commit()) {
 521     event.set_safepointId(safepoint_id);
 522     event.commit();
 523   }
 524 }
 525 
 526 bool SafepointSynchronize::is_cleanup_needed() {
 527   // Need a safepoint if some inline cache buffers is non-empty
 528   if (!InlineCacheBuffer::is_empty()) return true;
 529   return false;
 530 }
 531 
 532 
 533 
 534 // Various cleaning tasks that should be done periodically at safepoints
 535 void SafepointSynchronize::do_cleanup_tasks() {
 536   {
 537     const char* name = "deflating idle monitors";
 538     EventSafepointCleanupTask event;
 539     TraceTime t1(name, TraceSafepointCleanupTime);
 540     ObjectSynchronizer::deflate_idle_monitors();
 541     if (event.should_commit()) {
 542       event.set_safepointId(safepoint_counter());
 543       event.set_name(name);
 544       event.commit();
 545     }
 546   }
 547 
 548   {
 549     const char* name = "updating inline caches";
 550     EventSafepointCleanupTask event;
 551     TraceTime t2(name, TraceSafepointCleanupTime);
 552     InlineCacheBuffer::update_inline_caches();
 553     if (event.should_commit()) {
 554       event.set_safepointId(safepoint_counter());
 555       event.set_name(name);
 556       event.commit();
 557     }
 558   }
 559   {
 560     const char* name = "compilation policy safepoint handler";
 561     EventSafepointCleanupTask event;
 562     TraceTime t3(name, TraceSafepointCleanupTime);
 563     CompilationPolicy::policy()->do_safepoint_work();
 564     if (event.should_commit()) {
 565       event.set_safepointId(safepoint_counter());
 566       event.set_name(name);
 567       event.commit();
 568     }
 569   }
 570 
 571   {
 572     const char* name = "mark nmethods";
 573     EventSafepointCleanupTask event;
 574     TraceTime t4(name, TraceSafepointCleanupTime);
 575     NMethodSweeper::mark_active_nmethods();
 576     if (event.should_commit()) {
 577       event.set_safepointId(safepoint_counter());
 578       event.set_name(name);
 579       event.commit();
 580     }
 581   }
 582 
 583   if (SymbolTable::needs_rehashing()) {
 584     const char* name = "rehashing symbol table";
 585     EventSafepointCleanupTask event;
 586     TraceTime t5(name, TraceSafepointCleanupTime);
 587     SymbolTable::rehash_table();
 588     if (event.should_commit()) {
 589       event.set_safepointId(safepoint_counter());
 590       event.set_name(name);
 591       event.commit();
 592     }
 593   }
 594 
 595   if (StringTable::needs_rehashing()) {
 596     const char* name = "rehashing string table";
 597     EventSafepointCleanupTask event;
 598     TraceTime t6(name, TraceSafepointCleanupTime);
 599     StringTable::rehash_table();
 600     if (event.should_commit()) {
 601       event.set_safepointId(safepoint_counter());
 602       event.set_name(name);
 603       event.commit();
 604     }
 605   }
 606 
 607   {
 608     // CMS delays purging the CLDG until the beginning of the next safepoint and to
 609     // make sure concurrent sweep is done
 610     const char* name = "purging class loader data graph";
 611     EventSafepointCleanupTask event;
 612     TraceTime t7(name, TraceSafepointCleanupTime);
 613     ClassLoaderDataGraph::purge_if_needed();
 614     if (event.should_commit()) {
 615       event.set_safepointId(safepoint_counter());
 616       event.set_name(name);
 617       event.commit();
 618     }
 619   }
 620 }
 621 
 622 
 623 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
 624   switch(state) {
 625   case _thread_in_native:
 626     // native threads are safe if they have no java stack or have walkable stack
 627     return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
 628 
 629    // blocked threads should have already have walkable stack
 630   case _thread_blocked:
 631     assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
 632     return true;
 633 
 634   default:
 635     return false;
 636   }
 637 }
 638 


< prev index next >