1 /*
   2  * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "runtime/interfaceSupport.inline.hpp"
  28 #include "runtime/mutex.hpp"
  29 #include "runtime/osThread.hpp"
  30 #include "runtime/safepointMechanism.inline.hpp"
  31 #include "runtime/thread.inline.hpp"
  32 #include "utilities/events.inline.hpp"
  33 #include "utilities/macros.hpp"
  34 
  35 #ifdef ASSERT
  36 void Mutex::check_block_state(Thread* thread) {
  37   if (!_allow_vm_block && thread->is_VM_thread()) {
  38     // JavaThreads are checked to make sure that they do not hold _allow_vm_block locks during operations
  39     // that could safepoint.  Make sure the vm thread never uses locks with _allow_vm_block == false.
  40     fatal("VM thread could block on lock that may be held by a JavaThread during safepoint: %s", name());
  41   }
  42 
  43   assert(!os::ThreadCrashProtection::is_crash_protected(thread),
  44          "locking not allowed when crash protection is set");
  45 }
  46 
  47 void Mutex::check_safepoint_state(Thread* thread) {
  48   check_block_state(thread);
  49 
  50   // If the JavaThread checks for safepoint, verify that the lock wasn't created with safepoint_check_never.
  51   if (thread->is_active_Java_thread()) {
  52     assert(_safepoint_check_required != _safepoint_check_never,
  53            "This lock should %s have a safepoint check for Java threads: %s",
  54            _safepoint_check_required ? "always" : "never", name());
  55 
  56     // Also check NoSafepointVerifier, and thread state is _thread_in_vm
  57     thread->check_for_valid_safepoint_state();
  58   } else {
  59     // If initialized with safepoint_check_never, a NonJavaThread should never ask to safepoint check either.
  60     assert(_safepoint_check_required != _safepoint_check_never,
  61            "NonJavaThread should not check for safepoint");
  62   }
  63 }
  64 
  65 void Mutex::check_no_safepoint_state(Thread* thread) {
  66   check_block_state(thread);
  67   assert(!thread->is_active_Java_thread() || _safepoint_check_required != _safepoint_check_always,
  68          "This lock should %s have a safepoint check for Java threads: %s",
  69          _safepoint_check_required ? "always" : "never", name());
  70 }
  71 #endif // ASSERT
  72 
  73 void Mutex::lock_slow(Thread* self) {
  74   Mutex* in_flight_mutex = NULL;
  75   DEBUG_ONLY(int retry_cnt = 0;)
  76   bool is_active_Java_thread = self->is_active_Java_thread();
  77   do {
  78     // The lock is contended
  79 
  80   #ifdef ASSERT
  81     if (retry_cnt++ > 3) {
  82       log_trace(vmmutex)("JavaThread " INTPTR_FORMAT " on %d attempt trying to acquire vmmutex %s", p2i(self), retry_cnt, _name);
  83     }
  84   #endif // ASSERT
  85 
  86     // Is it a JavaThread participating in the safepoint protocol.
  87     if (is_active_Java_thread) {
  88       assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
  89       { ThreadBlockInVMWithDeadlockCheck tbivmdc((JavaThread *) self, &in_flight_mutex);
  90         in_flight_mutex = this;  // save for ~ThreadBlockInVMWithDeadlockCheck
  91         _lock.lock();
  92       }
  93       if (in_flight_mutex != NULL) {
  94         // Not unlocked by ~ThreadBlockInVMWithDeadlockCheck
  95         break;
  96       }
  97     } else {
  98       _lock.lock();
  99       break;
 100     }
 101   } while(!_lock.try_lock());
 102 
 103   assert_owner(NULL);
 104   set_owner(self);
 105 }
 106 
 107 
 108 void Monitor::notify() {
 109   assert_owner(Thread::current());
 110   _lock.notify();
 111 }
 112 
 113 void Monitor::notify_all() {
 114   assert_owner(Thread::current());
 115   _lock.notify_all();
 116 }
 117 
 118 #ifdef ASSERT
 119 void Monitor::assert_wait_lock_state(Thread* self) {
 120   Mutex* least = get_least_ranked_lock_besides_this(self->owned_locks());
 121   assert(least != this, "Specification of get_least_... call above");
 122   if (least != NULL && least->rank() <= special) {
 123     ::tty->print("Attempting to wait on monitor %s/%d while holding"
 124                " lock %s/%d -- possible deadlock",
 125                name(), rank(), least->name(), least->rank());
 126     assert(false, "Shouldn't block(wait) while holding a lock of rank special");
 127   }
 128 }
 129 #endif // ASSERT
 130 
 131 bool Monitor::wait_without_safepoint_check(long timeout) {
 132   Thread* const self = Thread::current();
 133 
 134   // timeout is in milliseconds - with zero meaning never timeout
 135   assert(timeout >= 0, "negative timeout");
 136 
 137   assert_owner(self);
 138   assert_wait_lock_state(self);
 139 
 140   // conceptually set the owner to NULL in anticipation of
 141   // abdicating the lock in wait
 142   set_owner(NULL);
 143   // Check safepoint state after resetting owner and possible NSV.
 144   check_no_safepoint_state(self);
 145 
 146   int wait_status = _lock.wait(timeout);
 147   set_owner(self);
 148   return wait_status != 0;          // return true IFF timeout
 149 }
 150 
 151 bool Monitor::wait(long timeout, bool as_suspend_equivalent) {
 152   Thread* const self = Thread::current();
 153 
 154   // timeout is in milliseconds - with zero meaning never timeout
 155   assert(timeout >= 0, "negative timeout");
 156 
 157   assert_owner(self);
 158 
 159   // Safepoint checking logically implies an active JavaThread.
 160   guarantee(self->is_active_Java_thread(), "invariant");
 161   assert_wait_lock_state(self);
 162 
 163   int wait_status;
 164   // conceptually set the owner to NULL in anticipation of
 165   // abdicating the lock in wait
 166   set_owner(NULL);
 167   // Check safepoint state after resetting owner and possible NSV.
 168   check_safepoint_state(self);
 169   JavaThread *jt = (JavaThread *)self;
 170   Mutex* in_flight_mutex = NULL;
 171 
 172   {
 173     ThreadBlockInVMWithDeadlockCheck tbivmdc(jt, &in_flight_mutex);
 174     OSThreadWaitState osts(self->osthread(), false /* not Object.wait() */);
 175     if (as_suspend_equivalent) {
 176       jt->set_suspend_equivalent();
 177       // cleared by handle_special_suspend_equivalent_condition() or
 178       // java_suspend_self()
 179     }
 180 
 181     wait_status = _lock.wait(timeout);
 182     in_flight_mutex = this;  // save for ~ThreadBlockInVMWithDeadlockCheck
 183 
 184     // were we externally suspended while we were waiting?
 185     if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
 186       // Our event wait has finished and we own the lock, but
 187       // while we were waiting another thread suspended us. We don't
 188       // want to hold the lock while suspended because that
 189       // would surprise the thread that suspended us.
 190       _lock.unlock();
 191       jt->java_suspend_self();
 192       _lock.lock();
 193     }
 194   }
 195 
 196   if (in_flight_mutex != NULL) {
 197     // Not unlocked by ~ThreadBlockInVMWithDeadlockCheck
 198     assert_owner(NULL);
 199     // Conceptually reestablish ownership of the lock.
 200     set_owner(self);
 201   } else {
 202     lock(self);
 203   }
 204 
 205   return wait_status != 0;          // return true IFF timeout
 206 }
 207 
 208 Mutex::~Mutex() {
 209   assert_owner(NULL);
 210 }
 211 
 212 // Only Threads_lock, Heap_lock and SR_lock may be safepoint_check_sometimes.
 213 bool is_sometimes_ok(const char* name) {
 214   return (strcmp(name, "Threads_lock") == 0 || strcmp(name, "Heap_lock") == 0 || strcmp(name, "SR_lock") == 0);
 215 }
 216 
 217 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
 218              SafepointCheckRequired safepoint_check_required) : _owner(NULL) {
 219   assert(os::mutex_init_done(), "Too early!");
 220   if (name == NULL) {
 221     strcpy(_name, "UNKNOWN");
 222   } else {
 223     strncpy(_name, name, MUTEX_NAME_LEN - 1);
 224     _name[MUTEX_NAME_LEN - 1] = '\0';
 225   }
 226 #ifdef ASSERT
 227   _allow_vm_block  = allow_vm_block;
 228   _rank            = Rank;
 229   _safepoint_check_required = safepoint_check_required;
 230 
 231   assert(_safepoint_check_required != _safepoint_check_sometimes || is_sometimes_ok(name),
 232          "Lock has _safepoint_check_sometimes %s", name);
 233 
 234   assert(_rank > special || _allow_vm_block,
 235          "Special locks or below should allow the vm to block");
 236   assert(_rank > special || _safepoint_check_required == _safepoint_check_never,
 237          "Special locks or below should never safepoint");
 238 #endif
 239 }
 240 
 241 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
 242              SafepointCheckRequired safepoint_check_required) :
 243   Mutex(Rank, name, allow_vm_block, safepoint_check_required) {}
 244 
 245 bool Mutex::owned_by_self() const {
 246   return _owner == Thread::current();
 247 }
 248 
 249 void Mutex::print_on_error(outputStream* st) const {
 250   st->print("[" PTR_FORMAT, p2i(this));
 251   st->print("] %s", _name);
 252   st->print(" - owner thread: " PTR_FORMAT, p2i(_owner));
 253 }
 254 
 255 // ----------------------------------------------------------------------------------
 256 // Non-product code
 257 
 258 #ifndef PRODUCT
 259 const char* print_safepoint_check(Mutex::SafepointCheckRequired safepoint_check) {
 260   switch (safepoint_check) {
 261   case Mutex::_safepoint_check_never:     return "safepoint_check_never";
 262   case Mutex::_safepoint_check_sometimes: return "safepoint_check_sometimes";
 263   case Mutex::_safepoint_check_always:    return "safepoint_check_always";
 264   default: return "";
 265   }
 266 }
 267 
 268 void Mutex::print_on(outputStream* st) const {
 269   st->print("Mutex: [" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
 270             p2i(this), _name, p2i(_owner));
 271   if (_allow_vm_block) {
 272     st->print("%s", " allow_vm_block");
 273   }
 274   st->print(" %s", print_safepoint_check(_safepoint_check_required));
 275   st->cr();
 276 }
 277 #endif
 278 
 279 #ifdef ASSERT
 280 void Mutex::assert_owner(Thread * expected) {
 281   const char* msg = "invalid owner";
 282   if (expected == NULL) {
 283     msg = "should be un-owned";
 284   }
 285   else if (expected == Thread::current()) {
 286     msg = "should be owned by current thread";
 287   }
 288   assert(_owner == expected,
 289          "%s: owner=" INTPTR_FORMAT ", should be=" INTPTR_FORMAT,
 290          msg, p2i(_owner), p2i(expected));
 291 }
 292 
 293 Mutex* Mutex::get_least_ranked_lock(Mutex* locks) {
 294   Mutex *res, *tmp;
 295   for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
 296     if (tmp->rank() < res->rank()) {
 297       res = tmp;
 298     }
 299   }
 300   if (!SafepointSynchronize::is_at_safepoint()) {
 301     // In this case, we expect the held locks to be
 302     // in increasing rank order (modulo any native ranks)
 303     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
 304       if (tmp->next() != NULL) {
 305         assert(tmp->rank() == Mutex::native ||
 306                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
 307       }
 308     }
 309   }
 310   return res;
 311 }
 312 
 313 Mutex* Mutex::get_least_ranked_lock_besides_this(Mutex* locks) {
 314   Mutex *res, *tmp;
 315   for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
 316     if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
 317       res = tmp;
 318     }
 319   }
 320   if (!SafepointSynchronize::is_at_safepoint()) {
 321     // In this case, we expect the held locks to be
 322     // in increasing rank order (modulo any native ranks)
 323     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
 324       if (tmp->next() != NULL) {
 325         assert(tmp->rank() == Mutex::native ||
 326                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
 327       }
 328     }
 329   }
 330   return res;
 331 }
 332 
 333 bool Mutex::contains(Mutex* locks, Mutex* lock) {
 334   for (; locks != NULL; locks = locks->next()) {
 335     if (locks == lock) {
 336       return true;
 337     }
 338   }
 339   return false;
 340 }
 341 
 342 // NSV implied with locking allow_vm_block or !safepoint_check locks.
 343 void Mutex::no_safepoint_verifier(Thread* thread, bool enable) {
 344   // The tty_lock is special because it is released for the safepoint by
 345   // the safepoint mechanism.
 346   if (this == tty_lock) {
 347     return;
 348   }
 349 
 350   if (_allow_vm_block) {
 351     if (enable) {
 352       thread->_no_safepoint_count++;
 353     } else {
 354       thread->_no_safepoint_count--;
 355     }
 356   }
 357 }
 358 
 359 // Called immediately after lock acquisition or release as a diagnostic
 360 // to track the lock-set of the thread and test for rank violations that
 361 // might indicate exposure to deadlock.
 362 // Rather like an EventListener for _owner (:>).
 363 
 364 void Mutex::set_owner_implementation(Thread *new_owner) {
 365   // This function is solely responsible for maintaining
 366   // and checking the invariant that threads and locks
 367   // are in a 1/N relation, with some some locks unowned.
 368   // It uses the Mutex::_owner, Mutex::_next, and
 369   // Thread::_owned_locks fields, and no other function
 370   // changes those fields.
 371   // It is illegal to set the mutex from one non-NULL
 372   // owner to another--it must be owned by NULL as an
 373   // intermediate state.
 374 
 375   if (new_owner != NULL) {
 376     // the thread is acquiring this lock
 377 
 378     assert(new_owner == Thread::current(), "Should I be doing this?");
 379     assert(_owner == NULL, "setting the owner thread of an already owned mutex");
 380     _owner = new_owner; // set the owner
 381 
 382     // link "this" into the owned locks list
 383 
 384     Mutex* locks = get_least_ranked_lock(new_owner->owned_locks());
 385     // Mutex::set_owner_implementation is a friend of Thread
 386 
 387     assert(this->rank() >= 0, "bad lock rank");
 388 
 389     // Deadlock avoidance rules require us to acquire Mutexes only in
 390     // a global total order. For example m1 is the lowest ranked mutex
 391     // that the thread holds and m2 is the mutex the thread is trying
 392     // to acquire, then deadlock avoidance rules require that the rank
 393     // of m2 be less than the rank of m1.
 394     // The rank Mutex::native  is an exception in that it is not subject
 395     // to the verification rules.
 396     if (this->rank() != Mutex::native &&
 397         this->rank() != Mutex::suspend_resume &&
 398         locks != NULL && locks->rank() <= this->rank() &&
 399         !SafepointSynchronize::is_at_safepoint()) {
 400       new_owner->print_owned_locks();
 401       fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
 402             "possible deadlock", this->name(), this->rank(),
 403             locks->name(), locks->rank());
 404     }
 405 
 406     this->_next = new_owner->_owned_locks;
 407     new_owner->_owned_locks = this;
 408 
 409     // NSV implied with locking allow_vm_block flag.
 410     no_safepoint_verifier(new_owner, true);
 411 
 412   } else {
 413     // the thread is releasing this lock
 414 
 415     Thread* old_owner = _owner;
 416     _last_owner = old_owner;
 417 
 418     assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
 419     assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
 420 
 421     _owner = NULL; // set the owner
 422 
 423     Mutex* locks = old_owner->owned_locks();
 424 
 425     // remove "this" from the owned locks list
 426 
 427     Mutex* prev = NULL;
 428     bool found = false;
 429     for (; locks != NULL; prev = locks, locks = locks->next()) {
 430       if (locks == this) {
 431         found = true;
 432         break;
 433       }
 434     }
 435     assert(found, "Removing a lock not owned");
 436     if (prev == NULL) {
 437       old_owner->_owned_locks = _next;
 438     } else {
 439       prev->_next = _next;
 440     }
 441     _next = NULL;
 442 
 443     // ~NSV implied with locking allow_vm_block flag.
 444     no_safepoint_verifier(old_owner, false);
 445   }
 446 }
 447 #endif // ASSERT