1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/logStream.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "runtime/thread.inline.hpp"
  29 #include "runtime/threadSMR.inline.hpp"
  30 #include "services/threadService.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 #include "utilities/resourceHash.hpp"
  33 
  34 Monitor*              ThreadsSMRSupport::_smr_delete_lock =
  35                           new Monitor(Monitor::special, "smr_delete_lock",
  36                                       false /* allow_vm_block */,
  37                                       Monitor::_safepoint_check_never);
  38 // The '_cnt', '_max' and '_times" fields are enabled via
  39 // -XX:+EnableThreadSMRStatistics:
  40 
  41 // # of parallel threads in _smr_delete_lock->wait().
  42 // Impl note: Hard to imagine > 64K waiting threads so this could be 16-bit,
  43 // but there is no nice 16-bit _FORMAT support.
  44 uint                  ThreadsSMRSupport::_smr_delete_lock_wait_cnt = 0;
  45 
  46 // Max # of parallel threads in _smr_delete_lock->wait().
  47 // Impl note: See _smr_delete_lock_wait_cnt note.
  48 uint                  ThreadsSMRSupport::_smr_delete_lock_wait_max = 0;
  49 
  50 // Flag to indicate when an _smr_delete_lock->notify() is needed.
  51 // Impl note: See _smr_delete_lock_wait_cnt note.
  52 volatile uint         ThreadsSMRSupport::_smr_delete_notify = 0;
  53 
  54 // # of threads deleted over VM lifetime.
  55 // Impl note: Atomically incremented over VM lifetime so use unsigned for more
  56 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
  57 // isn't available everywhere (or is it?).
  58 volatile uint         ThreadsSMRSupport::_smr_deleted_thread_cnt = 0;
  59 
  60 // Max time in millis to delete a thread.
  61 // Impl note: 16-bit might be too small on an overloaded machine. Use
  62 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
  63 // loop for correctness.
  64 volatile uint         ThreadsSMRSupport::_smr_deleted_thread_time_max = 0;
  65 
  66 // Cumulative time in millis to delete threads.
  67 // Impl note: Atomically added to over VM lifetime so use unsigned for more
  68 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
  69 // isn't available everywhere (or is it?).
  70 volatile uint         ThreadsSMRSupport::_smr_deleted_thread_times = 0;
  71 
  72 ThreadsList* volatile ThreadsSMRSupport::_smr_java_thread_list = new ThreadsList(0);
  73 
  74 // # of ThreadsLists allocated over VM lifetime.
  75 // Impl note: We allocate a new ThreadsList for every thread create and
  76 // every thread delete so we need a bigger type than the
  77 // _smr_deleted_thread_cnt field.
  78 uint64_t              ThreadsSMRSupport::_smr_java_thread_list_alloc_cnt = 1;
  79 
  80 // # of ThreadsLists freed over VM lifetime.
  81 // Impl note: See _smr_java_thread_list_alloc_cnt note.
  82 uint64_t              ThreadsSMRSupport::_smr_java_thread_list_free_cnt = 0;
  83 
  84 // Max size ThreadsList allocated.
  85 // Impl note: Max # of threads alive at one time should fit in unsigned 32-bit.
  86 uint                  ThreadsSMRSupport::_smr_java_thread_list_max = 0;
  87 
  88 // Max # of nested ThreadsLists for a thread.
  89 // Impl note: Hard to imagine > 64K nested ThreadsLists so this could be
  90 // 16-bit, but there is no nice 16-bit _FORMAT support.
  91 uint                  ThreadsSMRSupport::_smr_nested_thread_list_max = 0;
  92 
  93 // # of ThreadsListHandles deleted over VM lifetime.
  94 // Impl note: Atomically incremented over VM lifetime so use unsigned for
  95 // more range. There will be fewer ThreadsListHandles than threads so
  96 // unsigned 32-bit should be fine.
  97 volatile uint         ThreadsSMRSupport::_smr_tlh_cnt = 0;
  98 
  99 // Max time in millis to delete a ThreadsListHandle.
 100 // Impl note: 16-bit might be too small on an overloaded machine. Use
 101 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
 102 // loop for correctness.
 103 volatile uint         ThreadsSMRSupport::_smr_tlh_time_max = 0;
 104 
 105 // Cumulative time in millis to delete ThreadsListHandles.
 106 // Impl note: Atomically added to over VM lifetime so use unsigned for more
 107 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
 108 // isn't available everywhere (or is it?).
 109 volatile uint         ThreadsSMRSupport::_smr_tlh_times = 0;
 110 
 111 ThreadsList*          ThreadsSMRSupport::_smr_to_delete_list = NULL;
 112 
 113 // # of parallel ThreadsLists on the to-delete list.
 114 // Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
 115 // this could be 16-bit, but there is no nice 16-bit _FORMAT support.
 116 uint                  ThreadsSMRSupport::_smr_to_delete_list_cnt = 0;
 117 
 118 // Max # of parallel ThreadsLists on the to-delete list.
 119 // Impl note: See _smr_to_delete_list_cnt note.
 120 uint                  ThreadsSMRSupport::_smr_to_delete_list_max = 0;
 121 
 122 
 123 // 'inline' functions first so the definitions are before first use:
 124 
 125 inline void ThreadsSMRSupport::add_smr_deleted_thread_times(uint add_value) {
 126   Atomic::add(add_value, &_smr_deleted_thread_times);
 127 }
 128 
 129 inline void ThreadsSMRSupport::inc_smr_deleted_thread_cnt() {
 130   Atomic::inc(&_smr_deleted_thread_cnt);
 131 }
 132 
 133 inline void ThreadsSMRSupport::update_smr_deleted_thread_time_max(uint new_value) {
 134   while (true) {
 135     uint cur_value = _smr_deleted_thread_time_max;
 136     if (new_value <= cur_value) {
 137       // No need to update max value so we're done.
 138       break;
 139     }
 140     if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
 141       // Updated max value so we're done. Otherwise try it all again.
 142       break;
 143     }
 144   }
 145 }
 146 
 147 
 148 // Hash table of pointers found by a scan. Used for collecting hazard
 149 // pointers (ThreadsList references). Also used for collecting JavaThreads
 150 // that are indirectly referenced by hazard ptrs. An instance of this
 151 // class only contains one type of pointer.
 152 //
 153 class ThreadScanHashtable : public CHeapObj<mtThread> {
 154  private:
 155   static bool ptr_equals(void * const& s1, void * const& s2) {
 156     return s1 == s2;
 157   }
 158 
 159   static unsigned int ptr_hash(void * const& s1) {
 160     // 2654435761 = 2^32 * Phi (golden ratio)
 161     return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
 162   }
 163 
 164   int _table_size;
 165   // ResourceHashtable SIZE is specified at compile time so our
 166   // dynamic _table_size is unused for now; 1031 is the first prime
 167   // after 1024.
 168   typedef ResourceHashtable<void *, int, &ThreadScanHashtable::ptr_hash,
 169                             &ThreadScanHashtable::ptr_equals, 1031,
 170                             ResourceObj::C_HEAP, mtThread> PtrTable;
 171   PtrTable * _ptrs;
 172 
 173  public:
 174   // ResourceHashtable is passed to various functions and populated in
 175   // different places so we allocate it using C_HEAP to make it immune
 176   // from any ResourceMarks that happen to be in the code paths.
 177   ThreadScanHashtable(int table_size) : _table_size(table_size), _ptrs(new (ResourceObj::C_HEAP, mtThread) PtrTable()) {}
 178 
 179   ~ThreadScanHashtable() { delete _ptrs; }
 180 
 181   bool has_entry(void *pointer) {
 182     int *val_ptr = _ptrs->get(pointer);
 183     return val_ptr != NULL && *val_ptr == 1;
 184   }
 185 
 186   void add_entry(void *pointer) {
 187     _ptrs->put(pointer, 1);
 188   }
 189 };
 190 
 191 // Closure to gather JavaThreads indirectly referenced by hazard ptrs
 192 // (ThreadsList references) into a hash table. This closure handles part 2
 193 // of the dance - adding all the JavaThreads referenced by the hazard
 194 // pointer (ThreadsList reference) to the hash table.
 195 //
 196 class AddThreadHazardPointerThreadClosure : public ThreadClosure {
 197  private:
 198   ThreadScanHashtable *_table;
 199 
 200  public:
 201   AddThreadHazardPointerThreadClosure(ThreadScanHashtable *table) : _table(table) {}
 202 
 203   virtual void do_thread(Thread *thread) {
 204     if (!_table->has_entry((void*)thread)) {
 205       // The same JavaThread might be on more than one ThreadsList or
 206       // more than one thread might be using the same ThreadsList. In
 207       // either case, we only need a single entry for a JavaThread.
 208       _table->add_entry((void*)thread);
 209     }
 210   }
 211 };
 212 
 213 // Closure to gather JavaThreads indirectly referenced by hazard ptrs
 214 // (ThreadsList references) into a hash table. This closure handles part 1
 215 // of the dance - hazard ptr chain walking and dispatch to another
 216 // closure.
 217 //
 218 class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure {
 219  private:
 220   ThreadScanHashtable *_table;
 221  public:
 222   ScanHazardPtrGatherProtectedThreadsClosure(ThreadScanHashtable *table) : _table(table) {}
 223 
 224   virtual void do_thread(Thread *thread) {
 225     assert_locked_or_safepoint(Threads_lock);
 226 
 227     if (thread == NULL) return;
 228 
 229     // This code races with ThreadsSMRSupport::acquire_stable_list() which
 230     // is lock-free so we have to handle some special situations.
 231     //
 232     ThreadsList *current_list = NULL;
 233     while (true) {
 234       current_list = thread->get_threads_hazard_ptr();
 235       // No hazard ptr so nothing more to do.
 236       if (current_list == NULL) {
 237         assert(thread->get_nested_threads_hazard_ptr() == NULL,
 238                "cannot have a nested hazard ptr with a NULL regular hazard ptr");
 239         return;
 240       }
 241 
 242       // If the hazard ptr is verified as stable (since it is not tagged),
 243       // then it is safe to use.
 244       if (!Thread::is_hazard_ptr_tagged(current_list)) break;
 245 
 246       // The hazard ptr is tagged as not yet verified as being stable
 247       // so we are racing with acquire_stable_list(). This exchange
 248       // attempts to invalidate the hazard ptr. If we win the race,
 249       // then we can ignore this unstable hazard ptr and the other
 250       // thread will retry the attempt to publish a stable hazard ptr.
 251       // If we lose the race, then we retry our attempt to look at the
 252       // hazard ptr.
 253       if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
 254     }
 255 
 256     // The current JavaThread has a hazard ptr (ThreadsList reference)
 257     // which might be _smr_java_thread_list or it might be an older
 258     // ThreadsList that has been removed but not freed. In either case,
 259     // the hazard ptr is protecting all the JavaThreads on that
 260     // ThreadsList.
 261     AddThreadHazardPointerThreadClosure add_cl(_table);
 262     current_list->threads_do(&add_cl);
 263 
 264     // Any NestedThreadsLists are also protecting JavaThreads so
 265     // gather those also; the ThreadsLists may be different.
 266     for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
 267          node != NULL; node = node->next()) {
 268       node->t_list()->threads_do(&add_cl);
 269     }
 270   }
 271 };
 272 
 273 // Closure to gather hazard ptrs (ThreadsList references) into a hash table.
 274 //
 275 class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
 276  private:
 277   ThreadScanHashtable *_table;
 278  public:
 279   ScanHazardPtrGatherThreadsListClosure(ThreadScanHashtable *table) : _table(table) {}
 280 
 281   virtual void do_thread(Thread* thread) {
 282     assert_locked_or_safepoint(Threads_lock);
 283 
 284     if (thread == NULL) return;
 285     ThreadsList *threads = thread->get_threads_hazard_ptr();
 286     if (threads == NULL) {
 287       assert(thread->get_nested_threads_hazard_ptr() == NULL,
 288              "cannot have a nested hazard ptr with a NULL regular hazard ptr");
 289       return;
 290     }
 291     // In this closure we always ignore the tag that might mark this
 292     // hazard ptr as not yet verified. If we happen to catch an
 293     // unverified hazard ptr that is subsequently discarded (not
 294     // published), then the only side effect is that we might keep a
 295     // to-be-deleted ThreadsList alive a little longer.
 296     threads = Thread::untag_hazard_ptr(threads);
 297     if (!_table->has_entry((void*)threads)) {
 298       _table->add_entry((void*)threads);
 299     }
 300 
 301     // Any NestedThreadsLists are also protecting JavaThreads so
 302     // gather those also; the ThreadsLists may be different.
 303     for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
 304          node != NULL; node = node->next()) {
 305       threads = node->t_list();
 306       if (!_table->has_entry((void*)threads)) {
 307         _table->add_entry((void*)threads);
 308       }
 309     }
 310   }
 311 };
 312 
 313 // Closure to print JavaThreads that have a hazard ptr (ThreadsList
 314 // reference) that contains an indirect reference to a specific JavaThread.
 315 //
 316 class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
 317  private:
 318   JavaThread *_thread;
 319  public:
 320   ScanHazardPtrPrintMatchingThreadsClosure(JavaThread *thread) : _thread(thread) {}
 321 
 322   virtual void do_thread(Thread *thread) {
 323     assert_locked_or_safepoint(Threads_lock);
 324 
 325     if (thread == NULL) return;
 326     ThreadsList *current_list = thread->get_threads_hazard_ptr();
 327     if (current_list == NULL) {
 328       assert(thread->get_nested_threads_hazard_ptr() == NULL,
 329              "cannot have a nested hazard ptr with a NULL regular hazard ptr");
 330       return;
 331     }
 332     // If the hazard ptr is unverified, then ignore it.
 333     if (Thread::is_hazard_ptr_tagged(current_list)) return;
 334 
 335     // The current JavaThread has a hazard ptr (ThreadsList reference)
 336     // which might be _smr_java_thread_list or it might be an older
 337     // ThreadsList that has been removed but not freed. In either case,
 338     // the hazard ptr is protecting all the JavaThreads on that
 339     // ThreadsList, but we only care about matching a specific JavaThread.
 340     JavaThreadIterator jti(current_list);
 341     for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
 342       if (p == _thread) {
 343         log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
 344         break;
 345       }
 346     }
 347 
 348     // Any NestedThreadsLists are also protecting JavaThreads so
 349     // check those also; the ThreadsLists may be different.
 350     for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
 351          node != NULL; node = node->next()) {
 352       JavaThreadIterator jti(node->t_list());
 353       for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
 354         if (p == _thread) {
 355           log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a nested hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
 356           return;
 357         }
 358       }
 359     }
 360   }
 361 };
 362 
 363 
 364 // 'entries + 1' so we always have at least one entry.
 365 ThreadsList::ThreadsList(int entries) : _length(entries), _threads(NEW_C_HEAP_ARRAY(JavaThread*, entries + 1, mtThread)), _next_list(NULL) {
 366   *(JavaThread**)(_threads + entries) = NULL;  // Make sure the extra entry is NULL.
 367 }
 368 
 369 ThreadsList::~ThreadsList() {
 370   FREE_C_HEAP_ARRAY(JavaThread*, _threads);
 371 }
 372 
 373 // Remove a JavaThread from a ThreadsList. The returned ThreadsList is a
 374 // new copy of the specified ThreadsList with the specified JavaThread
 375 // removed.
 376 ThreadsList *ThreadsList::remove_thread(ThreadsList* list, JavaThread* java_thread) {
 377   assert(list->_length > 0, "sanity");
 378 
 379   uint i = (uint)list->find_index_of_JavaThread(java_thread);
 380   assert(i < list->_length, "did not find JavaThread on the list");
 381   const uint index = i;
 382   const uint new_length = list->_length - 1;
 383   const uint head_length = index;
 384   const uint tail_length = (new_length >= index) ? (new_length - index) : 0;
 385   ThreadsList *const new_list = new ThreadsList(new_length);
 386 
 387   if (head_length > 0) {
 388     Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
 389   }
 390   if (tail_length > 0) {
 391     Copy::disjoint_words((HeapWord*)list->_threads + index + 1, (HeapWord*)new_list->_threads + index, tail_length);
 392   }
 393 
 394   return new_list;
 395 }
 396 
 397 // Add a JavaThread to a ThreadsList. The returned ThreadsList is a
 398 // new copy of the specified ThreadsList with the specified JavaThread
 399 // appended to the end.
 400 ThreadsList *ThreadsList::add_thread(ThreadsList *list, JavaThread *java_thread) {
 401   const uint index = list->_length;
 402   const uint new_length = index + 1;
 403   const uint head_length = index;
 404   ThreadsList *const new_list = new ThreadsList(new_length);
 405 
 406   if (head_length > 0) {
 407     Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
 408   }
 409   *(JavaThread**)(new_list->_threads + index) = java_thread;
 410 
 411   return new_list;
 412 }
 413 
 414 int ThreadsList::find_index_of_JavaThread(JavaThread *target) {
 415   if (target == NULL) {
 416     return -1;
 417   }
 418   for (uint i = 0; i < length(); i++) {
 419     if (target == thread_at(i)) {
 420       return (int)i;
 421     }
 422   }
 423   return -1;
 424 }
 425 
 426 JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
 427   for (uint i = 0; i < length(); i++) {
 428     JavaThread* thread = thread_at(i);
 429     oop tobj = thread->threadObj();
 430     // Ignore the thread if it hasn't run yet, has exited
 431     // or is starting to exit.
 432     if (tobj != NULL && !thread->is_exiting() &&
 433         java_tid == java_lang_Thread::thread_id(tobj)) {
 434       // found a match
 435       return thread;
 436     }
 437   }
 438   return NULL;
 439 }
 440 
 441 bool ThreadsList::includes(const JavaThread * const p) const {
 442   if (p == NULL) {
 443     return false;
 444   }
 445   for (uint i = 0; i < length(); i++) {
 446     if (thread_at(i) == p) {
 447       return true;
 448     }
 449   }
 450   return false;
 451 }
 452 
 453 ThreadsListSetter::~ThreadsListSetter() {
 454   if (_target_needs_release) {
 455     // The hazard ptr in the target needs to be released.
 456     ThreadsSMRSupport::release_stable_list(_target);
 457   }
 458 }
 459 
 460 void ThreadsListSetter::set() {
 461   assert(_target->get_threads_hazard_ptr() == NULL, "hazard ptr should not already be set");
 462   (void) ThreadsSMRSupport::acquire_stable_list(_target, /* is_ThreadsListSetter */ true);
 463   _target_needs_release = true;
 464 }
 465 
 466 ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(ThreadsSMRSupport::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
 467   assert(self == Thread::current(), "sanity check");
 468   if (EnableThreadSMRStatistics) {
 469     _timer.start();
 470   }
 471 }
 472 
 473 ThreadsListHandle::~ThreadsListHandle() {
 474   ThreadsSMRSupport::release_stable_list(_self);
 475   if (EnableThreadSMRStatistics) {
 476     _timer.stop();
 477     uint millis = (uint)_timer.milliseconds();
 478     ThreadsSMRSupport::inc_smr_tlh_cnt();
 479     ThreadsSMRSupport::add_smr_tlh_times(millis);
 480     ThreadsSMRSupport::update_smr_tlh_time_max(millis);
 481   }
 482 }
 483 
 484 // Convert an internal thread reference to a JavaThread found on the
 485 // associated ThreadsList. This ThreadsListHandle "protects" the
 486 // returned JavaThread *.
 487 //
 488 // If thread_oop_p is not NULL, then the caller wants to use the oop
 489 // after this call so the oop is returned. On success, *jt_pp is set
 490 // to the converted JavaThread * and true is returned. On error,
 491 // returns false.
 492 //
 493 bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
 494                                                          JavaThread ** jt_pp,
 495                                                          oop * thread_oop_p) {
 496   assert(this->list() != NULL, "must have a ThreadsList");
 497   assert(jt_pp != NULL, "must have a return JavaThread pointer");
 498   // thread_oop_p is optional so no assert()
 499 
 500   // The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI
 501   // allows a NULL thread parameter to signify "current thread" which
 502   // allows us to avoid calling cv_external_thread_to_JavaThread().
 503   // The JVM_* interfaces have no such leeway.
 504 
 505   oop thread_oop = JNIHandles::resolve_non_null(jthread);
 506   // Looks like an oop at this point.
 507   if (thread_oop_p != NULL) {
 508     // Return the oop to the caller; the caller may still want
 509     // the oop even if this function returns false.
 510     *thread_oop_p = thread_oop;
 511   }
 512 
 513   JavaThread *java_thread = java_lang_Thread::thread(thread_oop);
 514   if (java_thread == NULL) {
 515     // The java.lang.Thread does not contain a JavaThread * so it has
 516     // not yet run or it has died.
 517     return false;
 518   }
 519   // Looks like a live JavaThread at this point.
 520 
 521   if (java_thread != JavaThread::current()) {
 522     // jthread is not for the current JavaThread so have to verify
 523     // the JavaThread * against the ThreadsList.
 524     if (EnableThreadSMRExtraValidityChecks && !includes(java_thread)) {
 525       // Not on the JavaThreads list so it is not alive.
 526       return false;
 527     }
 528   }
 529 
 530   // Return a live JavaThread that is "protected" by the
 531   // ThreadsListHandle in the caller.
 532   *jt_pp = java_thread;
 533   return true;
 534 }
 535 
 536 // Acquire a stable ThreadsList.
 537 //
 538 ThreadsList *ThreadsSMRSupport::acquire_stable_list(Thread *self, bool is_ThreadsListSetter) {
 539   assert(self != NULL, "sanity check");
 540   // acquire_stable_list_nested_path() will grab the Threads_lock
 541   // so let's make sure the ThreadsListHandle is in a safe place.
 542   // ThreadsListSetter cannot make this check on this code path.
 543   debug_only(if (!is_ThreadsListSetter && StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
 544 
 545   if (self->get_threads_hazard_ptr() == NULL) {
 546     // The typical case is first.
 547     return acquire_stable_list_fast_path(self);
 548   }
 549 
 550   // The nested case is rare.
 551   return acquire_stable_list_nested_path(self);
 552 }
 553 
 554 // Fast path (and lock free) way to acquire a stable ThreadsList.
 555 //
 556 ThreadsList *ThreadsSMRSupport::acquire_stable_list_fast_path(Thread *self) {
 557   assert(self != NULL, "sanity check");
 558   assert(self->get_threads_hazard_ptr() == NULL, "sanity check");
 559   assert(self->get_nested_threads_hazard_ptr() == NULL,
 560          "cannot have a nested hazard ptr with a NULL regular hazard ptr");
 561 
 562   ThreadsList* threads;
 563 
 564   // Stable recording of a hazard ptr for SMR. This code does not use
 565   // locks so its use of the _smr_java_thread_list & _threads_hazard_ptr
 566   // fields is racy relative to code that uses those fields with locks.
 567   // OrderAccess and Atomic functions are used to deal with those races.
 568   //
 569   while (true) {
 570     threads = get_smr_java_thread_list();
 571 
 572     // Publish a tagged hazard ptr to denote that the hazard ptr is not
 573     // yet verified as being stable. Due to the fence after the hazard
 574     // ptr write, it will be sequentially consistent w.r.t. the
 575     // sequentially consistent writes of the ThreadsList, even on
 576     // non-multiple copy atomic machines where stores can be observed
 577     // in different order from different observer threads.
 578     ThreadsList* unverified_threads = Thread::tag_hazard_ptr(threads);
 579     self->set_threads_hazard_ptr(unverified_threads);
 580 
 581     // If _smr_java_thread_list has changed, we have lost a race with
 582     // Threads::add() or Threads::remove() and have to try again.
 583     if (get_smr_java_thread_list() != threads) {
 584       continue;
 585     }
 586 
 587     // We try to remove the tag which will verify the hazard ptr as
 588     // being stable. This exchange can race with a scanning thread
 589     // which might invalidate the tagged hazard ptr to keep it from
 590     // being followed to access JavaThread ptrs. If we lose the race,
 591     // we simply retry. If we win the race, then the stable hazard
 592     // ptr is officially published.
 593     if (self->cmpxchg_threads_hazard_ptr(threads, unverified_threads) == unverified_threads) {
 594       break;
 595     }
 596   }
 597 
 598   // A stable hazard ptr has been published letting other threads know
 599   // that the ThreadsList and the JavaThreads reachable from this list
 600   // are protected and hence they should not be deleted until everyone
 601   // agrees it is safe to do so.
 602 
 603   return threads;
 604 }
 605 
 606 // Acquire a nested stable ThreadsList; this is rare so it uses
 607 // Threads_lock.
 608 //
 609 ThreadsList *ThreadsSMRSupport::acquire_stable_list_nested_path(Thread *self) {
 610   assert(self != NULL, "sanity check");
 611   assert(self->get_threads_hazard_ptr() != NULL,
 612          "cannot have a NULL regular hazard ptr when acquiring a nested hazard ptr");
 613 
 614   // The thread already has a hazard ptr (ThreadsList ref) so we need
 615   // to create a nested ThreadsListHandle with the current ThreadsList
 616   // since it might be different than our current hazard ptr. The need
 617   // for a nested ThreadsListHandle is rare so we do this while holding
 618   // the Threads_lock so we don't race with the scanning code; the code
 619   // is so much simpler this way.
 620 
 621   NestedThreadsList* node;
 622   {
 623     // Only grab the Threads_lock if we don't already own it.
 624     MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
 625     node = new NestedThreadsList(get_smr_java_thread_list());
 626     // We insert at the front of the list to match up with the delete
 627     // in release_stable_list().
 628     node->set_next(self->get_nested_threads_hazard_ptr());
 629     self->set_nested_threads_hazard_ptr(node);
 630     if (EnableThreadSMRStatistics) {
 631       self->inc_nested_threads_hazard_ptr_cnt();
 632       if (self->nested_threads_hazard_ptr_cnt() > _smr_nested_thread_list_max) {
 633         _smr_nested_thread_list_max = self->nested_threads_hazard_ptr_cnt();
 634       }
 635     }
 636   }
 637   log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::acquire_stable_list: add NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
 638 
 639   return node->t_list();
 640 }
 641 
 642 // set_smr_delete_notify() and clear_smr_delete_notify() are called
 643 // under the protection of the smr_delete_lock, but we also use an
 644 // Atomic operation to ensure the memory update is seen earlier than
 645 // when the smr_delete_lock is dropped.
 646 //
 647 void ThreadsSMRSupport::clear_smr_delete_notify() {
 648   Atomic::dec(&_smr_delete_notify);
 649 }
 650 
 651 // Return true if the specified JavaThread is protected by a hazard
 652 // pointer (ThreadsList reference). Otherwise, returns false.
 653 //
 654 bool ThreadsSMRSupport::is_a_protected_JavaThread(JavaThread *thread) {
 655   assert_locked_or_safepoint(Threads_lock);
 656 
 657   // Hash table size should be first power of two higher than twice
 658   // the length of the Threads list.
 659   int hash_table_size = MIN2((int)get_smr_java_thread_list()->length(), 32) << 1;
 660   hash_table_size--;
 661   hash_table_size |= hash_table_size >> 1;
 662   hash_table_size |= hash_table_size >> 2;
 663   hash_table_size |= hash_table_size >> 4;
 664   hash_table_size |= hash_table_size >> 8;
 665   hash_table_size |= hash_table_size >> 16;
 666   hash_table_size++;
 667 
 668   // Gather a hash table of the JavaThreads indirectly referenced by
 669   // hazard ptrs.
 670   ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
 671   ScanHazardPtrGatherProtectedThreadsClosure scan_cl(scan_table);
 672   Threads::threads_do(&scan_cl);
 673 
 674   bool thread_is_protected = false;
 675   if (scan_table->has_entry((void*)thread)) {
 676     thread_is_protected = true;
 677   }
 678   delete scan_table;
 679   return thread_is_protected;
 680 }
 681 
 682 // Release a stable ThreadsList.
 683 //
 684 void ThreadsSMRSupport::release_stable_list(Thread *self) {
 685   assert(self != NULL, "sanity check");
 686   // release_stable_list_nested_path() will grab the Threads_lock
 687   // so let's make sure the ThreadsListHandle is in a safe place.
 688   debug_only(if (StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
 689 
 690   if (self->get_nested_threads_hazard_ptr() == NULL) {
 691     // The typical case is first.
 692     release_stable_list_fast_path(self);
 693     return;
 694   }
 695 
 696   // The nested case is rare.
 697   release_stable_list_nested_path(self);
 698 }
 699 
 700 // Fast path way to release a stable ThreadsList. The release portion
 701 // is lock-free, but the wake up portion is not.
 702 //
 703 void ThreadsSMRSupport::release_stable_list_fast_path(Thread *self) {
 704   assert(self != NULL, "sanity check");
 705   assert(self->get_threads_hazard_ptr() != NULL, "sanity check");
 706   assert(self->get_nested_threads_hazard_ptr() == NULL,
 707          "cannot have a nested hazard ptr when releasing a regular hazard ptr");
 708 
 709   // After releasing the hazard ptr, other threads may go ahead and
 710   // free up some memory temporarily used by a ThreadsList snapshot.
 711   self->set_threads_hazard_ptr(NULL);
 712 
 713   // We use double-check locking to reduce traffic on the system
 714   // wide smr_delete_lock.
 715   if (ThreadsSMRSupport::smr_delete_notify()) {
 716     // An exiting thread might be waiting in smr_delete(); we need to
 717     // check with smr_delete_lock to be sure.
 718     release_stable_list_wake_up((char *) "regular hazard ptr");
 719   }
 720 }
 721 
 722 // Release a nested stable ThreadsList; this is rare so it uses
 723 // Threads_lock.
 724 //
 725 void ThreadsSMRSupport::release_stable_list_nested_path(Thread *self) {
 726   assert(self != NULL, "sanity check");
 727   assert(self->get_nested_threads_hazard_ptr() != NULL, "sanity check");
 728   assert(self->get_threads_hazard_ptr() != NULL,
 729          "must have a regular hazard ptr to have nested hazard ptrs");
 730 
 731   // We have a nested ThreadsListHandle so we have to release it first.
 732   // The need for a nested ThreadsListHandle is rare so we do this while
 733   // holding the Threads_lock so we don't race with the scanning code;
 734   // the code is so much simpler this way.
 735 
 736   NestedThreadsList *node;
 737   {
 738     // Only grab the Threads_lock if we don't already own it.
 739     MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
 740     // We remove from the front of the list to match up with the insert
 741     // in acquire_stable_list().
 742     node = self->get_nested_threads_hazard_ptr();
 743     self->set_nested_threads_hazard_ptr(node->next());
 744     if (EnableThreadSMRStatistics) {
 745       self->dec_nested_threads_hazard_ptr_cnt();
 746     }
 747   }
 748 
 749   // An exiting thread might be waiting in smr_delete(); we need to
 750   // check with smr_delete_lock to be sure.
 751   release_stable_list_wake_up((char *) "nested hazard ptr");
 752 
 753   log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list: delete NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
 754 
 755   delete node;
 756 }
 757 
 758 // Wake up portion of the release stable ThreadsList protocol;
 759 // uses the smr_delete_lock().
 760 //
 761 void ThreadsSMRSupport::release_stable_list_wake_up(char *log_str) {
 762   assert(log_str != NULL, "sanity check");
 763 
 764   // Note: smr_delete_lock is held in smr_delete() for the entire
 765   // hazard ptr search so that we do not lose this notify() if
 766   // the exiting thread has to wait. That code path also holds
 767   // Threads_lock (which was grabbed before smr_delete_lock) so that
 768   // threads_do() can be called. This means the system can't start a
 769   // safepoint which means this thread can't take too long to get to
 770   // a safepoint because of being blocked on smr_delete_lock.
 771   //
 772   MonitorLockerEx ml(ThreadsSMRSupport::smr_delete_lock(), Monitor::_no_safepoint_check_flag);
 773   if (ThreadsSMRSupport::smr_delete_notify()) {
 774     // Notify any exiting JavaThreads that are waiting in smr_delete()
 775     // that we've released a ThreadsList.
 776     ml.notify_all();
 777     log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list notified %s", os::current_thread_id(), log_str);
 778   }
 779 }
 780 
 781 // See note for clear_smr_delete_notify().
 782 //
 783 void ThreadsSMRSupport::set_smr_delete_notify() {
 784   Atomic::inc(&_smr_delete_notify);
 785 }
 786 
 787 // Safely delete a JavaThread when it is no longer in use by a
 788 // ThreadsListHandle.
 789 //
 790 void ThreadsSMRSupport::smr_delete(JavaThread *thread) {
 791   assert(!Threads_lock->owned_by_self(), "sanity");
 792 
 793   bool has_logged_once = false;
 794   elapsedTimer timer;
 795   if (EnableThreadSMRStatistics) {
 796     timer.start();
 797   }
 798 
 799   while (true) {
 800     {
 801       // No safepoint check because this JavaThread is not on the
 802       // Threads list.
 803       MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
 804       // Cannot use a MonitorLockerEx helper here because we have
 805       // to drop the Threads_lock first if we wait.
 806       ThreadsSMRSupport::smr_delete_lock()->lock_without_safepoint_check();
 807       // Set the smr_delete_notify flag after we grab smr_delete_lock
 808       // and before we scan hazard ptrs because we're doing
 809       // double-check locking in release_stable_list().
 810       ThreadsSMRSupport::set_smr_delete_notify();
 811 
 812       if (!is_a_protected_JavaThread(thread)) {
 813         // This is the common case.
 814         ThreadsSMRSupport::clear_smr_delete_notify();
 815         ThreadsSMRSupport::smr_delete_lock()->unlock();
 816         break;
 817       }
 818       if (!has_logged_once) {
 819         has_logged_once = true;
 820         log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is not deleted.", os::current_thread_id(), p2i(thread));
 821         if (log_is_enabled(Debug, os, thread)) {
 822           ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
 823           Threads::threads_do(&scan_cl);
 824         }
 825       }
 826     } // We have to drop the Threads_lock to wait or delete the thread
 827 
 828     if (EnableThreadSMRStatistics) {
 829       _smr_delete_lock_wait_cnt++;
 830       if (_smr_delete_lock_wait_cnt > _smr_delete_lock_wait_max) {
 831         _smr_delete_lock_wait_max = _smr_delete_lock_wait_cnt;
 832       }
 833     }
 834     // Wait for a release_stable_list() call before we check again. No
 835     // safepoint check, no timeout, and not as suspend equivalent flag
 836     // because this JavaThread is not on the Threads list.
 837     ThreadsSMRSupport::smr_delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0,
 838                                      !Mutex::_as_suspend_equivalent_flag);
 839     if (EnableThreadSMRStatistics) {
 840       _smr_delete_lock_wait_cnt--;
 841     }
 842 
 843     ThreadsSMRSupport::clear_smr_delete_notify();
 844     ThreadsSMRSupport::smr_delete_lock()->unlock();
 845     // Retry the whole scenario.
 846   }
 847 
 848   if (ThreadLocalHandshakes) {
 849     // The thread is about to be deleted so cancel any handshake.
 850     thread->cancel_handshake();
 851   }
 852 
 853   delete thread;
 854   if (EnableThreadSMRStatistics) {
 855     timer.stop();
 856     uint millis = (uint)timer.milliseconds();
 857     ThreadsSMRSupport::inc_smr_deleted_thread_cnt();
 858     ThreadsSMRSupport::add_smr_deleted_thread_times(millis);
 859     ThreadsSMRSupport::update_smr_deleted_thread_time_max(millis);
 860   }
 861 
 862   log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is deleted.", os::current_thread_id(), p2i(thread));
 863 }
 864 
 865 bool ThreadsSMRSupport::smr_delete_notify() {
 866   // Use load_acquire() in order to see any updates to _smr_delete_notify
 867   // earlier than when smr_delete_lock is grabbed.
 868   return (OrderAccess::load_acquire(&_smr_delete_notify) != 0);
 869 }
 870 
 871 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
 872 // The specified ThreadsList may not get deleted during this call if it
 873 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
 874 // in the chain may get deleted by this call if they are no longer in-use.
 875 void ThreadsSMRSupport::smr_free_list(ThreadsList* threads) {
 876   assert_locked_or_safepoint(Threads_lock);
 877 
 878   threads->set_next_list(_smr_to_delete_list);
 879   _smr_to_delete_list = threads;
 880   if (EnableThreadSMRStatistics) {
 881     _smr_to_delete_list_cnt++;
 882     if (_smr_to_delete_list_cnt > _smr_to_delete_list_max) {
 883       _smr_to_delete_list_max = _smr_to_delete_list_cnt;
 884     }
 885   }
 886 
 887   // Hash table size should be first power of two higher than twice the length of the ThreadsList
 888   int hash_table_size = MIN2((int)get_smr_java_thread_list()->length(), 32) << 1;
 889   hash_table_size--;
 890   hash_table_size |= hash_table_size >> 1;
 891   hash_table_size |= hash_table_size >> 2;
 892   hash_table_size |= hash_table_size >> 4;
 893   hash_table_size |= hash_table_size >> 8;
 894   hash_table_size |= hash_table_size >> 16;
 895   hash_table_size++;
 896 
 897   // Gather a hash table of the current hazard ptrs:
 898   ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
 899   ScanHazardPtrGatherThreadsListClosure scan_cl(scan_table);
 900   Threads::threads_do(&scan_cl);
 901 
 902   // Walk through the linked list of pending freeable ThreadsLists
 903   // and free the ones that are not referenced from hazard ptrs.
 904   ThreadsList* current = _smr_to_delete_list;
 905   ThreadsList* prev = NULL;
 906   ThreadsList* next = NULL;
 907   bool threads_is_freed = false;
 908   while (current != NULL) {
 909     next = current->next_list();
 910     if (!scan_table->has_entry((void*)current)) {
 911       // This ThreadsList is not referenced by a hazard ptr.
 912       if (prev != NULL) {
 913         prev->set_next_list(next);
 914       }
 915       if (_smr_to_delete_list == current) {
 916         _smr_to_delete_list = next;
 917       }
 918 
 919       log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_free_list: threads=" INTPTR_FORMAT " is freed.", os::current_thread_id(), p2i(current));
 920       if (current == threads) threads_is_freed = true;
 921       delete current;
 922       if (EnableThreadSMRStatistics) {
 923         _smr_java_thread_list_free_cnt++;
 924         _smr_to_delete_list_cnt--;
 925       }
 926     } else {
 927       prev = current;
 928     }
 929     current = next;
 930   }
 931 
 932   if (!threads_is_freed) {
 933     // Only report "is not freed" on the original call to
 934     // smr_free_list() for this ThreadsList.
 935     log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_free_list: threads=" INTPTR_FORMAT " is not freed.", os::current_thread_id(), p2i(threads));
 936   }
 937 
 938   delete scan_table;
 939 }
 940 
 941 // Log Threads class SMR info.
 942 void ThreadsSMRSupport::log_smr_statistics() {
 943   LogTarget(Info, thread, smr) log;
 944   if (log.is_enabled()) {
 945     LogStream out(log);
 946     print_smr_info_on(&out);
 947   }
 948 }
 949 
 950 // Print Threads class SMR info.
 951 void ThreadsSMRSupport::print_smr_info_on(outputStream* st) {
 952   // Only grab the Threads_lock if we don't already own it
 953   // and if we are not reporting an error.
 954   MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock);
 955 
 956   st->print_cr("Threads class SMR info:");
 957   st->print_cr("_smr_java_thread_list=" INTPTR_FORMAT ", length=%u, "
 958                "elements={", p2i(_smr_java_thread_list),
 959                _smr_java_thread_list->length());
 960   print_smr_info_elements_on(st, _smr_java_thread_list);
 961   st->print_cr("}");
 962   if (_smr_to_delete_list != NULL) {
 963     st->print_cr("_smr_to_delete_list=" INTPTR_FORMAT ", length=%u, "
 964                  "elements={", p2i(_smr_to_delete_list),
 965                  _smr_to_delete_list->length());
 966     print_smr_info_elements_on(st, _smr_to_delete_list);
 967     st->print_cr("}");
 968     for (ThreadsList *t_list = _smr_to_delete_list->next_list();
 969          t_list != NULL; t_list = t_list->next_list()) {
 970       st->print("next-> " INTPTR_FORMAT ", length=%u, "
 971                 "elements={", p2i(t_list), t_list->length());
 972       print_smr_info_elements_on(st, t_list);
 973       st->print_cr("}");
 974     }
 975   }
 976   if (!EnableThreadSMRStatistics) {
 977     return;
 978   }
 979   st->print_cr("_smr_java_thread_list_alloc_cnt=" UINT64_FORMAT ","
 980                "_smr_java_thread_list_free_cnt=" UINT64_FORMAT ","
 981                "_smr_java_thread_list_max=%u, "
 982                "_smr_nested_thread_list_max=%u",
 983                _smr_java_thread_list_alloc_cnt,
 984                _smr_java_thread_list_free_cnt,
 985                _smr_java_thread_list_max,
 986                _smr_nested_thread_list_max);
 987   if (_smr_tlh_cnt > 0) {
 988     st->print_cr("_smr_tlh_cnt=%u"
 989                  ", _smr_tlh_times=%u"
 990                  ", avg_smr_tlh_time=%0.2f"
 991                  ", _smr_tlh_time_max=%u",
 992                  _smr_tlh_cnt, _smr_tlh_times,
 993                  ((double) _smr_tlh_times / _smr_tlh_cnt),
 994                  _smr_tlh_time_max);
 995   }
 996   if (_smr_deleted_thread_cnt > 0) {
 997     st->print_cr("_smr_deleted_thread_cnt=%u"
 998                  ", _smr_deleted_thread_times=%u"
 999                  ", avg_smr_deleted_thread_time=%0.2f"
1000                  ", _smr_deleted_thread_time_max=%u",
1001                  _smr_deleted_thread_cnt, _smr_deleted_thread_times,
1002                  ((double) _smr_deleted_thread_times / _smr_deleted_thread_cnt),
1003                  _smr_deleted_thread_time_max);
1004   }
1005   st->print_cr("_smr_delete_lock_wait_cnt=%u, _smr_delete_lock_wait_max=%u",
1006                _smr_delete_lock_wait_cnt, _smr_delete_lock_wait_max);
1007   st->print_cr("_smr_to_delete_list_cnt=%u, _smr_to_delete_list_max=%u",
1008                _smr_to_delete_list_cnt, _smr_to_delete_list_max);
1009 }
1010 
1011 // Print ThreadsList elements (4 per line).
1012 void ThreadsSMRSupport::print_smr_info_elements_on(outputStream* st,
1013                                          ThreadsList* t_list) {
1014   uint cnt = 0;
1015   JavaThreadIterator jti(t_list);
1016   for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
1017     st->print(INTPTR_FORMAT, p2i(jt));
1018     if (cnt < t_list->length() - 1) {
1019       // Separate with comma or comma-space except for the last one.
1020       if (((cnt + 1) % 4) == 0) {
1021         // Four INTPTR_FORMAT fit on an 80 column line so end the
1022         // current line with just a comma.
1023         st->print_cr(",");
1024       } else {
1025         // Not the last one on the current line so use comma-space:
1026         st->print(", ");
1027       }
1028     } else {
1029       // Last one so just end the current line.
1030       st->cr();
1031     }
1032     cnt++;
1033   }
1034 }