1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "prims/jvmtiRawMonitor.hpp" 28 #include "runtime/atomic.hpp" 29 #include "runtime/interfaceSupport.inline.hpp" 30 #include "runtime/orderAccess.hpp" 31 #include "runtime/thread.inline.hpp" 32 33 JvmtiRawMonitor::QNode::QNode(Thread* thread) : _next(NULL), _prev(NULL), 34 _event(thread->_ParkEvent), 35 _notified(0), _t_state(TS_RUN) { 36 } 37 38 GrowableArray<JvmtiRawMonitor*>* JvmtiPendingMonitors::_monitors = 39 new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1, true); 40 41 void JvmtiPendingMonitors::transition_raw_monitors() { 42 assert((Threads::number_of_threads()==1), 43 "Java thread has not been created yet or more than one java thread " 44 "is running. Raw monitor transition will not work"); 45 JavaThread* current_java_thread = JavaThread::current(); 46 assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm"); 47 for (int i = 0; i < count(); i++) { 48 JvmtiRawMonitor* rmonitor = monitors()->at(i); 49 rmonitor->raw_enter(current_java_thread); 50 } 51 // pending monitors are converted to real monitor so delete them all. 52 dispose(); 53 } 54 55 // 56 // class JvmtiRawMonitor 57 // 58 59 JvmtiRawMonitor::JvmtiRawMonitor(const char* name) : _owner(NULL), 60 _recursions(0), 61 _entry_list(NULL), 62 _wait_set(NULL), 63 _waiters(0), 64 _magic(JVMTI_RM_MAGIC), 65 _name(NULL) { 66 #ifdef ASSERT 67 _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal), name); 68 #endif 69 } 70 71 JvmtiRawMonitor::~JvmtiRawMonitor() { 72 #ifdef ASSERT 73 FreeHeap(_name); 74 #endif 75 _magic = 0; 76 } 77 78 79 bool 80 JvmtiRawMonitor::is_valid() { 81 int value = 0; 82 83 // This object might not be a JvmtiRawMonitor so we can't assume 84 // the _magic field is properly aligned. Get the value in a safe 85 // way and then check against JVMTI_RM_MAGIC. 86 87 switch (sizeof(_magic)) { 88 case 2: 89 value = Bytes::get_native_u2((address)&_magic); 90 break; 91 92 case 4: 93 value = Bytes::get_native_u4((address)&_magic); 94 break; 95 96 case 8: 97 value = Bytes::get_native_u8((address)&_magic); 98 break; 99 100 default: 101 guarantee(false, "_magic field is an unexpected size"); 102 } 103 104 return value == JVMTI_RM_MAGIC; 105 } 106 107 // ------------------------------------------------------------------------- 108 // The JVMTI raw monitor subsystem is entirely distinct from normal 109 // java-synchronization or jni-synchronization. JVMTI raw monitors are not 110 // associated with objects. They can be implemented in any manner 111 // that makes sense. The original implementors decided to piggy-back 112 // the raw-monitor implementation on the existing Java ObjectMonitor mechanism. 113 // Now we just use a simplified form of that ObjectMonitor code. 114 // 115 // Note that we use the single RawMonitor_lock to protect queue operations for 116 // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage 117 // is fairly rare, this is not of concern. The RawMonitor_lock can not 118 // be held indefinitely. The critical sections must be short and bounded. 119 // 120 // ------------------------------------------------------------------------- 121 122 void JvmtiRawMonitor::simple_enter(Thread* self) { 123 for (;;) { 124 if (Atomic::replace_if_null(self, &_owner)) { 125 return; 126 } 127 128 QNode node(self); 129 self->_ParkEvent->reset(); // strictly optional 130 node._t_state = QNode::TS_ENTER; 131 132 RawMonitor_lock->lock_without_safepoint_check(); 133 node._next = _entry_list; 134 _entry_list = &node; 135 OrderAccess::fence(); 136 if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) { 137 _entry_list = node._next; 138 RawMonitor_lock->unlock(); 139 return; 140 } 141 RawMonitor_lock->unlock(); 142 while (node._t_state == QNode::TS_ENTER) { 143 self->_ParkEvent->park(); 144 } 145 } 146 } 147 148 void JvmtiRawMonitor::simple_exit(Thread* self) { 149 guarantee(_owner == self, "invariant"); 150 OrderAccess::release_store(&_owner, (Thread*)NULL); 151 OrderAccess::fence(); 152 if (_entry_list == NULL) { 153 return; 154 } 155 156 RawMonitor_lock->lock_without_safepoint_check(); 157 QNode* w = _entry_list; 158 if (w != NULL) { 159 _entry_list = w->_next; 160 } 161 RawMonitor_lock->unlock(); 162 if (w != NULL) { 163 guarantee(w ->_t_state == QNode::TS_ENTER, "invariant"); 164 // Once we set _t_state to TS_RUN the waiting thread can complete 165 // simple_enter and 'w' is pointing into random stack space. So we have 166 // to ensure we extract the ParkEvent (which is in type-stable memory) 167 // before we set the state, and then don't access 'w'. 168 ParkEvent* ev = w->_event; 169 OrderAccess::loadstore(); 170 w->_t_state = QNode::TS_RUN; 171 OrderAccess::fence(); 172 ev->unpark(); 173 } 174 return; 175 } 176 177 int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { 178 guarantee(_owner == self , "invariant"); 179 guarantee(_recursions == 0, "invariant"); 180 181 QNode node(self); 182 node._notified = 0; 183 node._t_state = QNode::TS_WAIT; 184 185 RawMonitor_lock->lock_without_safepoint_check(); 186 node._next = _wait_set; 187 _wait_set = &node; 188 RawMonitor_lock->unlock(); 189 190 simple_exit(self); 191 guarantee(_owner != self, "invariant"); 192 193 int ret = OS_OK; 194 if (millis <= 0) { 195 self->_ParkEvent->park(); 196 } else { 197 ret = self->_ParkEvent->park(millis); 198 } 199 200 // If thread still resides on the waitset then unlink it. 201 // Double-checked locking -- the usage is safe in this context 202 // as _t_state is volatile and the lock-unlock operators are 203 // serializing (barrier-equivalent). 204 205 if (node._t_state == QNode::TS_WAIT) { 206 RawMonitor_lock->lock_without_safepoint_check(); 207 if (node._t_state == QNode::TS_WAIT) { 208 // Simple O(n) unlink, but performance isn't critical here. 209 QNode* p; 210 QNode* q = NULL; 211 for (p = _wait_set; p != &node; p = p->_next) { 212 q = p; 213 } 214 guarantee(p == &node, "invariant"); 215 if (q == NULL) { 216 guarantee (p == _wait_set, "invariant"); 217 _wait_set = p->_next; 218 } else { 219 guarantee(p == q->_next, "invariant"); 220 q->_next = p->_next; 221 } 222 node._t_state = QNode::TS_RUN; 223 } 224 RawMonitor_lock->unlock(); 225 } 226 227 guarantee(node._t_state == QNode::TS_RUN, "invariant"); 228 simple_enter(self); 229 230 guarantee(_owner == self, "invariant"); 231 guarantee(_recursions == 0, "invariant"); 232 return ret; 233 } 234 235 void JvmtiRawMonitor::simple_notify(Thread* self, bool all) { 236 guarantee(_owner == self, "invariant"); 237 if (_wait_set == NULL) { 238 return; 239 } 240 241 // We have two options: 242 // A. Transfer the threads from the _wait_set to the _entry_list 243 // B. Remove the thread from the _wait_set and unpark() it. 244 // 245 // We use (B), which is crude and results in lots of futile 246 // context switching. In particular (B) induces lots of contention. 247 248 ParkEvent* ev = NULL; // consider using a small auto array ... 249 RawMonitor_lock->lock_without_safepoint_check(); 250 for (;;) { 251 QNode* w = _wait_set; 252 if (w == NULL) break; 253 _wait_set = w->_next; 254 if (ev != NULL) { 255 ev->unpark(); 256 ev = NULL; 257 } 258 ev = w->_event; 259 OrderAccess::loadstore(); 260 w->_t_state = QNode::TS_RUN; 261 OrderAccess::storeload(); 262 if (!all) { 263 break; 264 } 265 } 266 RawMonitor_lock->unlock(); 267 if (ev != NULL) { 268 ev->unpark(); 269 } 270 return; 271 } 272 273 // Any JavaThread will enter here with state _thread_blocked 274 void JvmtiRawMonitor::raw_enter(Thread* self) { 275 void* contended; 276 JavaThread* jt = NULL; 277 // don't enter raw monitor if thread is being externally suspended, it will 278 // surprise the suspender if a "suspended" thread can still enter monitor 279 if (self->is_Java_thread()) { 280 jt = (JavaThread*)self; 281 jt->SR_lock()->lock_without_safepoint_check(); 282 while (jt->is_external_suspend()) { 283 jt->SR_lock()->unlock(); 284 jt->java_suspend_self(); 285 jt->SR_lock()->lock_without_safepoint_check(); 286 } 287 // guarded by SR_lock to avoid racing with new external suspend requests. 288 contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL); 289 jt->SR_lock()->unlock(); 290 } else { 291 contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL); 292 } 293 294 if (contended == self) { 295 _recursions++; 296 return; 297 } 298 299 if (contended == NULL) { 300 guarantee(_owner == self, "invariant"); 301 guarantee(_recursions == 0, "invariant"); 302 return; 303 } 304 305 self->set_current_pending_raw_monitor(this); 306 307 if (!self->is_Java_thread()) { 308 simple_enter(self); 309 } else { 310 guarantee(jt->thread_state() == _thread_blocked, "invariant"); 311 for (;;) { 312 jt->set_suspend_equivalent(); 313 // cleared by handle_special_suspend_equivalent_condition() or 314 // java_suspend_self() 315 simple_enter(jt); 316 317 // were we externally suspended while we were waiting? 318 if (!jt->handle_special_suspend_equivalent_condition()) { 319 break; 320 } 321 322 // This thread was externally suspended 323 // We have reentered the contended monitor, but while we were 324 // waiting another thread suspended us. We don't want to reenter 325 // the monitor while suspended because that would surprise the 326 // thread that suspended us. 327 // 328 // Drop the lock 329 simple_exit(jt); 330 331 jt->java_suspend_self(); 332 } 333 } 334 335 self->set_current_pending_raw_monitor(NULL); 336 337 guarantee(_owner == self, "invariant"); 338 guarantee(_recursions == 0, "invariant"); 339 } 340 341 int JvmtiRawMonitor::raw_exit(Thread* self) { 342 if (self != _owner) { 343 return M_ILLEGAL_MONITOR_STATE; 344 } 345 if (_recursions > 0) { 346 _recursions--; 347 } else { 348 simple_exit(self); 349 } 350 351 return M_OK; 352 } 353 354 // All JavaThreads will enter here with state _thread_blocked 355 356 int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) { 357 if (self != _owner) { 358 return M_ILLEGAL_MONITOR_STATE; 359 } 360 361 // To avoid spurious wakeups we reset the parkevent. This is strictly optional. 362 // The caller must be able to tolerate spurious returns from raw_wait(). 363 self->_ParkEvent->reset(); 364 OrderAccess::fence(); 365 366 JavaThread* jt = NULL; 367 // check interrupt event 368 if (interruptible) { 369 assert(self->is_Java_thread(), "Only JavaThreads can be interruptible"); 370 jt = (JavaThread*)self; 371 if (jt->is_interrupted(true)) { 372 return M_INTERRUPTED; 373 } 374 } else { 375 assert(!self->is_Java_thread(), "JavaThreads must be interuptible"); 376 } 377 378 intptr_t save = _recursions; 379 _recursions = 0; 380 _waiters++; 381 if (self->is_Java_thread()) { 382 guarantee(jt->thread_state() == _thread_blocked, "invariant"); 383 jt->set_suspend_equivalent(); 384 } 385 int rv = simple_wait(self, millis); 386 _recursions = save; 387 _waiters--; 388 389 guarantee(self == _owner, "invariant"); 390 if (self->is_Java_thread()) { 391 for (;;) { 392 if (!jt->handle_special_suspend_equivalent_condition()) { 393 break; 394 } 395 simple_exit(jt); 396 jt->java_suspend_self(); 397 simple_enter(jt); 398 jt->set_suspend_equivalent(); 399 } 400 guarantee(jt == _owner, "invariant"); 401 } 402 403 if (interruptible && jt->is_interrupted(true)) { 404 return M_INTERRUPTED; 405 } 406 407 return M_OK; 408 } 409 410 int JvmtiRawMonitor::raw_notify(Thread* self) { 411 if (self != _owner) { 412 return M_ILLEGAL_MONITOR_STATE; 413 } 414 simple_notify(self, false); 415 return M_OK; 416 } 417 418 int JvmtiRawMonitor::raw_notifyAll(Thread* self) { 419 if (self != _owner) { 420 return M_ILLEGAL_MONITOR_STATE; 421 } 422 simple_notify(self, true); 423 return M_OK; 424 }