Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/runtime/thread.cpp
+++ new/src/share/vm/runtime/thread.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoader.hpp"
27 27 #include "classfile/javaClasses.hpp"
28 28 #include "classfile/systemDictionary.hpp"
29 29 #include "classfile/vmSymbols.hpp"
30 30 #include "code/scopeDesc.hpp"
31 31 #include "compiler/compileBroker.hpp"
32 32 #include "interpreter/interpreter.hpp"
33 33 #include "interpreter/linkResolver.hpp"
34 34 #include "interpreter/oopMapCache.hpp"
35 35 #include "jvmtifiles/jvmtiEnv.hpp"
36 36 #include "memory/gcLocker.inline.hpp"
37 37 #include "memory/oopFactory.hpp"
38 38 #include "memory/universe.inline.hpp"
39 39 #include "oops/instanceKlass.hpp"
40 40 #include "oops/objArrayOop.hpp"
41 41 #include "oops/oop.inline.hpp"
42 42 #include "oops/symbol.hpp"
43 43 #include "prims/jvm_misc.hpp"
44 44 #include "prims/jvmtiExport.hpp"
45 45 #include "prims/jvmtiThreadState.hpp"
46 46 #include "prims/privilegedStack.hpp"
47 47 #include "runtime/aprofiler.hpp"
48 48 #include "runtime/arguments.hpp"
49 49 #include "runtime/biasedLocking.hpp"
50 50 #include "runtime/deoptimization.hpp"
51 51 #include "runtime/fprofiler.hpp"
52 52 #include "runtime/frame.inline.hpp"
53 53 #include "runtime/init.hpp"
54 54 #include "runtime/interfaceSupport.hpp"
55 55 #include "runtime/java.hpp"
56 56 #include "runtime/javaCalls.hpp"
57 57 #include "runtime/jniPeriodicChecker.hpp"
58 58 #include "runtime/memprofiler.hpp"
59 59 #include "runtime/mutexLocker.hpp"
60 60 #include "runtime/objectMonitor.hpp"
61 61 #include "runtime/osThread.hpp"
62 62 #include "runtime/safepoint.hpp"
63 63 #include "runtime/sharedRuntime.hpp"
64 64 #include "runtime/statSampler.hpp"
65 65 #include "runtime/stubRoutines.hpp"
66 66 #include "runtime/task.hpp"
67 67 #include "runtime/threadCritical.hpp"
68 68 #include "runtime/threadLocalStorage.hpp"
69 69 #include "runtime/vframe.hpp"
70 70 #include "runtime/vframeArray.hpp"
71 71 #include "runtime/vframe_hp.hpp"
72 72 #include "runtime/vmThread.hpp"
73 73 #include "runtime/vm_operations.hpp"
74 74 #include "services/attachListener.hpp"
75 75 #include "services/management.hpp"
76 76 #include "services/threadService.hpp"
77 77 #include "trace/traceEventTypes.hpp"
78 78 #include "utilities/defaultStream.hpp"
79 79 #include "utilities/dtrace.hpp"
80 80 #include "utilities/events.hpp"
81 81 #include "utilities/preserveException.hpp"
82 82 #ifdef TARGET_OS_FAMILY_linux
83 83 # include "os_linux.inline.hpp"
84 84 # include "thread_linux.inline.hpp"
85 85 #endif
86 86 #ifdef TARGET_OS_FAMILY_solaris
87 87 # include "os_solaris.inline.hpp"
88 88 # include "thread_solaris.inline.hpp"
89 89 #endif
90 90 #ifdef TARGET_OS_FAMILY_windows
91 91 # include "os_windows.inline.hpp"
92 92 # include "thread_windows.inline.hpp"
93 93 #endif
94 94 #ifdef TARGET_OS_FAMILY_bsd
95 95 # include "os_bsd.inline.hpp"
96 96 # include "thread_bsd.inline.hpp"
97 97 #endif
98 98 #ifndef SERIALGC
99 99 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
100 100 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
101 101 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
102 102 #endif
103 103 #ifdef COMPILER1
104 104 #include "c1/c1_Compiler.hpp"
105 105 #endif
106 106 #ifdef COMPILER2
107 107 #include "opto/c2compiler.hpp"
108 108 #include "opto/idealGraphPrinter.hpp"
109 109 #endif
110 110
111 111 #ifdef DTRACE_ENABLED
112 112
113 113 // Only bother with this argument setup if dtrace is available
114 114
115 115 #ifndef USDT2
116 116 HS_DTRACE_PROBE_DECL(hotspot, vm__init__begin);
117 117 HS_DTRACE_PROBE_DECL(hotspot, vm__init__end);
118 118 HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t,
119 119 intptr_t, intptr_t, bool);
120 120 HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
121 121 intptr_t, intptr_t, bool);
122 122
123 123 #define DTRACE_THREAD_PROBE(probe, javathread) \
124 124 { \
125 125 ResourceMark rm(this); \
126 126 int len = 0; \
127 127 const char* name = (javathread)->get_thread_name(); \
128 128 len = strlen(name); \
129 129 HS_DTRACE_PROBE5(hotspot, thread__##probe, \
130 130 name, len, \
131 131 java_lang_Thread::thread_id((javathread)->threadObj()), \
132 132 (javathread)->osthread()->thread_id(), \
133 133 java_lang_Thread::is_daemon((javathread)->threadObj())); \
134 134 }
135 135
136 136 #else /* USDT2 */
137 137
138 138 #define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_PROBE_START
139 139 #define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_PROBE_STOP
140 140
141 141 #define DTRACE_THREAD_PROBE(probe, javathread) \
142 142 { \
143 143 ResourceMark rm(this); \
144 144 int len = 0; \
145 145 const char* name = (javathread)->get_thread_name(); \
146 146 len = strlen(name); \
147 147 HOTSPOT_THREAD_PROBE_##probe( /* probe = start, stop */ \
148 148 (char *) name, len, \
149 149 java_lang_Thread::thread_id((javathread)->threadObj()), \
150 150 (uintptr_t) (javathread)->osthread()->thread_id(), \
151 151 java_lang_Thread::is_daemon((javathread)->threadObj())); \
152 152 }
153 153
154 154 #endif /* USDT2 */
155 155
156 156 #else // ndef DTRACE_ENABLED
157 157
158 158 #define DTRACE_THREAD_PROBE(probe, javathread)
159 159
160 160 #endif // ndef DTRACE_ENABLED
161 161
162 162 // Class hierarchy
163 163 // - Thread
164 164 // - VMThread
165 165 // - WatcherThread
166 166 // - ConcurrentMarkSweepThread
167 167 // - JavaThread
168 168 // - CompilerThread
169 169
170 170 // ======= Thread ========
171 171
172 172 // Support for forcing alignment of thread objects for biased locking
173 173 void* Thread::operator new(size_t size) {
174 174 if (UseBiasedLocking) {
175 175 const int alignment = markOopDesc::biased_lock_alignment;
176 176 size_t aligned_size = size + (alignment - sizeof(intptr_t));
177 177 void* real_malloc_addr = CHeapObj::operator new(aligned_size);
178 178 void* aligned_addr = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
179 179 assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
180 180 ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
181 181 "JavaThread alignment code overflowed allocated storage");
182 182 if (TraceBiasedLocking) {
183 183 if (aligned_addr != real_malloc_addr)
184 184 tty->print_cr("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
185 185 real_malloc_addr, aligned_addr);
186 186 }
187 187 ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
188 188 return aligned_addr;
189 189 } else {
190 190 return CHeapObj::operator new(size);
191 191 }
192 192 }
193 193
194 194 void Thread::operator delete(void* p) {
195 195 if (UseBiasedLocking) {
196 196 void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
197 197 CHeapObj::operator delete(real_malloc_addr);
198 198 } else {
199 199 CHeapObj::operator delete(p);
200 200 }
201 201 }
202 202
203 203
204 204 // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
205 205 // JavaThread
206 206
207 207
208 208 Thread::Thread() {
209 209 // stack and get_thread
210 210 set_stack_base(NULL);
211 211 set_stack_size(0);
212 212 set_self_raw_id(0);
213 213 set_lgrp_id(-1);
214 214
215 215 // allocated data structures
216 216 set_osthread(NULL);
217 217 set_resource_area(new ResourceArea());
218 218 set_handle_area(new HandleArea(NULL));
219 219 set_active_handles(NULL);
220 220 set_free_handle_block(NULL);
221 221 set_last_handle_mark(NULL);
222 222
223 223 // This initial value ==> never claimed.
224 224 _oops_do_parity = 0;
225 225
226 226 // the handle mark links itself to last_handle_mark
227 227 new HandleMark(this);
228 228
229 229 // plain initialization
230 230 debug_only(_owned_locks = NULL;)
231 231 debug_only(_allow_allocation_count = 0;)
232 232 NOT_PRODUCT(_allow_safepoint_count = 0;)
233 233 NOT_PRODUCT(_skip_gcalot = false;)
234 234 CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
235 235 _jvmti_env_iteration_count = 0;
236 236 set_allocated_bytes(0);
237 237 set_trace_buffer(NULL);
238 238 _vm_operation_started_count = 0;
239 239 _vm_operation_completed_count = 0;
240 240 _current_pending_monitor = NULL;
241 241 _current_pending_monitor_is_from_java = true;
242 242 _current_waiting_monitor = NULL;
243 243 _num_nested_signal = 0;
244 244 omFreeList = NULL ;
245 245 omFreeCount = 0 ;
246 246 omFreeProvision = 32 ;
247 247 omInUseList = NULL ;
248 248 omInUseCount = 0 ;
249 249
250 250 #ifdef ASSERT
251 251 _visited_for_critical_count = false;
252 252 #endif
253 253
254 254 _SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
255 255 _suspend_flags = 0;
256 256
257 257 // thread-specific hashCode stream generator state - Marsaglia shift-xor form
258 258 _hashStateX = os::random() ;
259 259 _hashStateY = 842502087 ;
260 260 _hashStateZ = 0x8767 ; // (int)(3579807591LL & 0xffff) ;
261 261 _hashStateW = 273326509 ;
262 262
263 263 _OnTrap = 0 ;
264 264 _schedctl = NULL ;
265 265 _Stalled = 0 ;
266 266 _TypeTag = 0x2BAD ;
267 267
268 268 // Many of the following fields are effectively final - immutable
269 269 // Note that nascent threads can't use the Native Monitor-Mutex
270 270 // construct until the _MutexEvent is initialized ...
271 271 // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
272 272 // we might instead use a stack of ParkEvents that we could provision on-demand.
273 273 // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
274 274 // and ::Release()
275 275 _ParkEvent = ParkEvent::Allocate (this) ;
276 276 _SleepEvent = ParkEvent::Allocate (this) ;
277 277 _MutexEvent = ParkEvent::Allocate (this) ;
278 278 _MuxEvent = ParkEvent::Allocate (this) ;
279 279
280 280 #ifdef CHECK_UNHANDLED_OOPS
281 281 if (CheckUnhandledOops) {
282 282 _unhandled_oops = new UnhandledOops(this);
283 283 }
284 284 #endif // CHECK_UNHANDLED_OOPS
285 285 #ifdef ASSERT
286 286 if (UseBiasedLocking) {
287 287 assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
288 288 assert(this == _real_malloc_address ||
289 289 this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
290 290 "bug in forced alignment of thread objects");
291 291 }
292 292 #endif /* ASSERT */
293 293 }
294 294
295 295 void Thread::initialize_thread_local_storage() {
296 296 // Note: Make sure this method only calls
297 297 // non-blocking operations. Otherwise, it might not work
298 298 // with the thread-startup/safepoint interaction.
299 299
300 300 // During Java thread startup, safepoint code should allow this
301 301 // method to complete because it may need to allocate memory to
302 302 // store information for the new thread.
303 303
304 304 // initialize structure dependent on thread local storage
305 305 ThreadLocalStorage::set_thread(this);
306 306
307 307 // set up any platform-specific state.
308 308 os::initialize_thread();
309 309
310 310 }
311 311
312 312 void Thread::record_stack_base_and_size() {
313 313 set_stack_base(os::current_stack_base());
314 314 set_stack_size(os::current_stack_size());
315 315 }
316 316
317 317
318 318 Thread::~Thread() {
319 319 // Reclaim the objectmonitors from the omFreeList of the moribund thread.
320 320 ObjectSynchronizer::omFlush (this) ;
321 321
322 322 // deallocate data structures
323 323 delete resource_area();
324 324 // since the handle marks are using the handle area, we have to deallocated the root
325 325 // handle mark before deallocating the thread's handle area,
326 326 assert(last_handle_mark() != NULL, "check we have an element");
327 327 delete last_handle_mark();
328 328 assert(last_handle_mark() == NULL, "check we have reached the end");
329 329
330 330 // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
331 331 // We NULL out the fields for good hygiene.
332 332 ParkEvent::Release (_ParkEvent) ; _ParkEvent = NULL ;
333 333 ParkEvent::Release (_SleepEvent) ; _SleepEvent = NULL ;
334 334 ParkEvent::Release (_MutexEvent) ; _MutexEvent = NULL ;
335 335 ParkEvent::Release (_MuxEvent) ; _MuxEvent = NULL ;
336 336
337 337 delete handle_area();
338 338
339 339 // osthread() can be NULL, if creation of thread failed.
340 340 if (osthread() != NULL) os::free_thread(osthread());
341 341
342 342 delete _SR_lock;
343 343
344 344 // clear thread local storage if the Thread is deleting itself
345 345 if (this == Thread::current()) {
346 346 ThreadLocalStorage::set_thread(NULL);
347 347 } else {
348 348 // In the case where we're not the current thread, invalidate all the
349 349 // caches in case some code tries to get the current thread or the
350 350 // thread that was destroyed, and gets stale information.
351 351 ThreadLocalStorage::invalidate_all();
352 352 }
353 353 CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
354 354 }
355 355
356 356 // NOTE: dummy function for assertion purpose.
357 357 void Thread::run() {
358 358 ShouldNotReachHere();
359 359 }
360 360
361 361 #ifdef ASSERT
362 362 // Private method to check for dangling thread pointer
363 363 void check_for_dangling_thread_pointer(Thread *thread) {
364 364 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
365 365 "possibility of dangling Thread pointer");
366 366 }
367 367 #endif
368 368
369 369
370 370 #ifndef PRODUCT
371 371 // Tracing method for basic thread operations
372 372 void Thread::trace(const char* msg, const Thread* const thread) {
373 373 if (!TraceThreadEvents) return;
374 374 ResourceMark rm;
375 375 ThreadCritical tc;
376 376 const char *name = "non-Java thread";
377 377 int prio = -1;
378 378 if (thread->is_Java_thread()
379 379 && !thread->is_Compiler_thread()) {
380 380 // The Threads_lock must be held to get information about
381 381 // this thread but may not be in some situations when
382 382 // tracing thread events.
383 383 bool release_Threads_lock = false;
384 384 if (!Threads_lock->owned_by_self()) {
385 385 Threads_lock->lock();
386 386 release_Threads_lock = true;
387 387 }
388 388 JavaThread* jt = (JavaThread *)thread;
389 389 name = (char *)jt->get_thread_name();
390 390 oop thread_oop = jt->threadObj();
391 391 if (thread_oop != NULL) {
392 392 prio = java_lang_Thread::priority(thread_oop);
393 393 }
394 394 if (release_Threads_lock) {
395 395 Threads_lock->unlock();
396 396 }
397 397 }
398 398 tty->print_cr("Thread::%s " INTPTR_FORMAT " [%lx] %s (prio: %d)", msg, thread, thread->osthread()->thread_id(), name, prio);
399 399 }
400 400 #endif
401 401
402 402
403 403 ThreadPriority Thread::get_priority(const Thread* const thread) {
404 404 trace("get priority", thread);
405 405 ThreadPriority priority;
406 406 // Can return an error!
407 407 (void)os::get_priority(thread, priority);
408 408 assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
409 409 return priority;
410 410 }
411 411
412 412 void Thread::set_priority(Thread* thread, ThreadPriority priority) {
413 413 trace("set priority", thread);
414 414 debug_only(check_for_dangling_thread_pointer(thread);)
415 415 // Can return an error!
416 416 (void)os::set_priority(thread, priority);
417 417 }
418 418
419 419
420 420 void Thread::start(Thread* thread) {
421 421 trace("start", thread);
422 422 // Start is different from resume in that its safety is guaranteed by context or
423 423 // being called from a Java method synchronized on the Thread object.
424 424 if (!DisableStartThread) {
425 425 if (thread->is_Java_thread()) {
426 426 // Initialize the thread state to RUNNABLE before starting this thread.
427 427 // Can not set it after the thread started because we do not know the
428 428 // exact thread state at that time. It could be in MONITOR_WAIT or
429 429 // in SLEEPING or some other state.
430 430 java_lang_Thread::set_thread_status(((JavaThread*)thread)->threadObj(),
431 431 java_lang_Thread::RUNNABLE);
432 432 }
433 433 os::start_thread(thread);
434 434 }
435 435 }
436 436
437 437 // Enqueue a VM_Operation to do the job for us - sometime later
438 438 void Thread::send_async_exception(oop java_thread, oop java_throwable) {
439 439 VM_ThreadStop* vm_stop = new VM_ThreadStop(java_thread, java_throwable);
440 440 VMThread::execute(vm_stop);
441 441 }
442 442
443 443
444 444 //
445 445 // Check if an external suspend request has completed (or has been
446 446 // cancelled). Returns true if the thread is externally suspended and
447 447 // false otherwise.
448 448 //
449 449 // The bits parameter returns information about the code path through
450 450 // the routine. Useful for debugging:
451 451 //
452 452 // set in is_ext_suspend_completed():
453 453 // 0x00000001 - routine was entered
454 454 // 0x00000010 - routine return false at end
455 455 // 0x00000100 - thread exited (return false)
456 456 // 0x00000200 - suspend request cancelled (return false)
457 457 // 0x00000400 - thread suspended (return true)
458 458 // 0x00001000 - thread is in a suspend equivalent state (return true)
459 459 // 0x00002000 - thread is native and walkable (return true)
460 460 // 0x00004000 - thread is native_trans and walkable (needed retry)
461 461 //
462 462 // set in wait_for_ext_suspend_completion():
463 463 // 0x00010000 - routine was entered
464 464 // 0x00020000 - suspend request cancelled before loop (return false)
465 465 // 0x00040000 - thread suspended before loop (return true)
466 466 // 0x00080000 - suspend request cancelled in loop (return false)
467 467 // 0x00100000 - thread suspended in loop (return true)
468 468 // 0x00200000 - suspend not completed during retry loop (return false)
469 469 //
470 470
471 471 // Helper class for tracing suspend wait debug bits.
472 472 //
473 473 // 0x00000100 indicates that the target thread exited before it could
474 474 // self-suspend which is not a wait failure. 0x00000200, 0x00020000 and
475 475 // 0x00080000 each indicate a cancelled suspend request so they don't
476 476 // count as wait failures either.
477 477 #define DEBUG_FALSE_BITS (0x00000010 | 0x00200000)
478 478
479 479 class TraceSuspendDebugBits : public StackObj {
480 480 private:
481 481 JavaThread * jt;
482 482 bool is_wait;
483 483 bool called_by_wait; // meaningful when !is_wait
484 484 uint32_t * bits;
485 485
486 486 public:
487 487 TraceSuspendDebugBits(JavaThread *_jt, bool _is_wait, bool _called_by_wait,
488 488 uint32_t *_bits) {
489 489 jt = _jt;
490 490 is_wait = _is_wait;
491 491 called_by_wait = _called_by_wait;
492 492 bits = _bits;
493 493 }
494 494
495 495 ~TraceSuspendDebugBits() {
496 496 if (!is_wait) {
497 497 #if 1
498 498 // By default, don't trace bits for is_ext_suspend_completed() calls.
499 499 // That trace is very chatty.
500 500 return;
501 501 #else
502 502 if (!called_by_wait) {
503 503 // If tracing for is_ext_suspend_completed() is enabled, then only
504 504 // trace calls to it from wait_for_ext_suspend_completion()
505 505 return;
506 506 }
507 507 #endif
508 508 }
509 509
510 510 if (AssertOnSuspendWaitFailure || TraceSuspendWaitFailures) {
511 511 if (bits != NULL && (*bits & DEBUG_FALSE_BITS) != 0) {
512 512 MutexLocker ml(Threads_lock); // needed for get_thread_name()
513 513 ResourceMark rm;
514 514
515 515 tty->print_cr(
516 516 "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
517 517 jt->get_thread_name(), *bits);
518 518
519 519 guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
520 520 }
521 521 }
522 522 }
523 523 };
524 524 #undef DEBUG_FALSE_BITS
525 525
526 526
527 527 bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits) {
528 528 TraceSuspendDebugBits tsdb(this, false /* !is_wait */, called_by_wait, bits);
529 529
530 530 bool did_trans_retry = false; // only do thread_in_native_trans retry once
531 531 bool do_trans_retry; // flag to force the retry
532 532
533 533 *bits |= 0x00000001;
534 534
535 535 do {
536 536 do_trans_retry = false;
537 537
538 538 if (is_exiting()) {
539 539 // Thread is in the process of exiting. This is always checked
540 540 // first to reduce the risk of dereferencing a freed JavaThread.
541 541 *bits |= 0x00000100;
542 542 return false;
543 543 }
544 544
545 545 if (!is_external_suspend()) {
546 546 // Suspend request is cancelled. This is always checked before
547 547 // is_ext_suspended() to reduce the risk of a rogue resume
548 548 // confusing the thread that made the suspend request.
549 549 *bits |= 0x00000200;
550 550 return false;
551 551 }
552 552
553 553 if (is_ext_suspended()) {
554 554 // thread is suspended
555 555 *bits |= 0x00000400;
556 556 return true;
557 557 }
558 558
559 559 // Now that we no longer do hard suspends of threads running
560 560 // native code, the target thread can be changing thread state
561 561 // while we are in this routine:
562 562 //
563 563 // _thread_in_native -> _thread_in_native_trans -> _thread_blocked
564 564 //
565 565 // We save a copy of the thread state as observed at this moment
566 566 // and make our decision about suspend completeness based on the
567 567 // copy. This closes the race where the thread state is seen as
568 568 // _thread_in_native_trans in the if-thread_blocked check, but is
569 569 // seen as _thread_blocked in if-thread_in_native_trans check.
570 570 JavaThreadState save_state = thread_state();
571 571
572 572 if (save_state == _thread_blocked && is_suspend_equivalent()) {
573 573 // If the thread's state is _thread_blocked and this blocking
574 574 // condition is known to be equivalent to a suspend, then we can
575 575 // consider the thread to be externally suspended. This means that
576 576 // the code that sets _thread_blocked has been modified to do
577 577 // self-suspension if the blocking condition releases. We also
578 578 // used to check for CONDVAR_WAIT here, but that is now covered by
579 579 // the _thread_blocked with self-suspension check.
580 580 //
581 581 // Return true since we wouldn't be here unless there was still an
582 582 // external suspend request.
583 583 *bits |= 0x00001000;
584 584 return true;
585 585 } else if (save_state == _thread_in_native && frame_anchor()->walkable()) {
586 586 // Threads running native code will self-suspend on native==>VM/Java
587 587 // transitions. If its stack is walkable (should always be the case
588 588 // unless this function is called before the actual java_suspend()
589 589 // call), then the wait is done.
590 590 *bits |= 0x00002000;
591 591 return true;
592 592 } else if (!called_by_wait && !did_trans_retry &&
593 593 save_state == _thread_in_native_trans &&
594 594 frame_anchor()->walkable()) {
595 595 // The thread is transitioning from thread_in_native to another
596 596 // thread state. check_safepoint_and_suspend_for_native_trans()
597 597 // will force the thread to self-suspend. If it hasn't gotten
598 598 // there yet we may have caught the thread in-between the native
599 599 // code check above and the self-suspend. Lucky us. If we were
600 600 // called by wait_for_ext_suspend_completion(), then it
601 601 // will be doing the retries so we don't have to.
602 602 //
603 603 // Since we use the saved thread state in the if-statement above,
604 604 // there is a chance that the thread has already transitioned to
605 605 // _thread_blocked by the time we get here. In that case, we will
606 606 // make a single unnecessary pass through the logic below. This
607 607 // doesn't hurt anything since we still do the trans retry.
608 608
609 609 *bits |= 0x00004000;
610 610
611 611 // Once the thread leaves thread_in_native_trans for another
612 612 // thread state, we break out of this retry loop. We shouldn't
613 613 // need this flag to prevent us from getting back here, but
614 614 // sometimes paranoia is good.
615 615 did_trans_retry = true;
616 616
617 617 // We wait for the thread to transition to a more usable state.
618 618 for (int i = 1; i <= SuspendRetryCount; i++) {
619 619 // We used to do an "os::yield_all(i)" call here with the intention
620 620 // that yielding would increase on each retry. However, the parameter
621 621 // is ignored on Linux which means the yield didn't scale up. Waiting
622 622 // on the SR_lock below provides a much more predictable scale up for
623 623 // the delay. It also provides a simple/direct point to check for any
624 624 // safepoint requests from the VMThread
625 625
626 626 // temporarily drops SR_lock while doing wait with safepoint check
627 627 // (if we're a JavaThread - the WatcherThread can also call this)
628 628 // and increase delay with each retry
629 629 SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
630 630
631 631 // check the actual thread state instead of what we saved above
632 632 if (thread_state() != _thread_in_native_trans) {
633 633 // the thread has transitioned to another thread state so
634 634 // try all the checks (except this one) one more time.
635 635 do_trans_retry = true;
636 636 break;
637 637 }
638 638 } // end retry loop
639 639
640 640
641 641 }
642 642 } while (do_trans_retry);
643 643
644 644 *bits |= 0x00000010;
645 645 return false;
646 646 }
647 647
648 648 //
649 649 // Wait for an external suspend request to complete (or be cancelled).
650 650 // Returns true if the thread is externally suspended and false otherwise.
651 651 //
652 652 bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
653 653 uint32_t *bits) {
654 654 TraceSuspendDebugBits tsdb(this, true /* is_wait */,
655 655 false /* !called_by_wait */, bits);
656 656
657 657 // local flag copies to minimize SR_lock hold time
658 658 bool is_suspended;
659 659 bool pending;
660 660 uint32_t reset_bits;
661 661
662 662 // set a marker so is_ext_suspend_completed() knows we are the caller
663 663 *bits |= 0x00010000;
664 664
665 665 // We use reset_bits to reinitialize the bits value at the top of
666 666 // each retry loop. This allows the caller to make use of any
667 667 // unused bits for their own marking purposes.
668 668 reset_bits = *bits;
669 669
670 670 {
671 671 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
672 672 is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
673 673 delay, bits);
674 674 pending = is_external_suspend();
675 675 }
676 676 // must release SR_lock to allow suspension to complete
677 677
678 678 if (!pending) {
679 679 // A cancelled suspend request is the only false return from
680 680 // is_ext_suspend_completed() that keeps us from entering the
681 681 // retry loop.
682 682 *bits |= 0x00020000;
683 683 return false;
684 684 }
685 685
686 686 if (is_suspended) {
687 687 *bits |= 0x00040000;
688 688 return true;
689 689 }
690 690
691 691 for (int i = 1; i <= retries; i++) {
692 692 *bits = reset_bits; // reinit to only track last retry
693 693
694 694 // We used to do an "os::yield_all(i)" call here with the intention
695 695 // that yielding would increase on each retry. However, the parameter
696 696 // is ignored on Linux which means the yield didn't scale up. Waiting
697 697 // on the SR_lock below provides a much more predictable scale up for
698 698 // the delay. It also provides a simple/direct point to check for any
699 699 // safepoint requests from the VMThread
700 700
701 701 {
702 702 MutexLocker ml(SR_lock());
703 703 // wait with safepoint check (if we're a JavaThread - the WatcherThread
704 704 // can also call this) and increase delay with each retry
705 705 SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
706 706
707 707 is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
708 708 delay, bits);
709 709
710 710 // It is possible for the external suspend request to be cancelled
711 711 // (by a resume) before the actual suspend operation is completed.
712 712 // Refresh our local copy to see if we still need to wait.
713 713 pending = is_external_suspend();
714 714 }
715 715
716 716 if (!pending) {
717 717 // A cancelled suspend request is the only false return from
718 718 // is_ext_suspend_completed() that keeps us from staying in the
719 719 // retry loop.
720 720 *bits |= 0x00080000;
721 721 return false;
722 722 }
723 723
724 724 if (is_suspended) {
725 725 *bits |= 0x00100000;
726 726 return true;
727 727 }
728 728 } // end retry loop
729 729
730 730 // thread did not suspend after all our retries
731 731 *bits |= 0x00200000;
732 732 return false;
733 733 }
734 734
735 735 #ifndef PRODUCT
736 736 void JavaThread::record_jump(address target, address instr, const char* file, int line) {
737 737
738 738 // This should not need to be atomic as the only way for simultaneous
739 739 // updates is via interrupts. Even then this should be rare or non-existant
740 740 // and we don't care that much anyway.
741 741
742 742 int index = _jmp_ring_index;
743 743 _jmp_ring_index = (index + 1 ) & (jump_ring_buffer_size - 1);
744 744 _jmp_ring[index]._target = (intptr_t) target;
745 745 _jmp_ring[index]._instruction = (intptr_t) instr;
746 746 _jmp_ring[index]._file = file;
747 747 _jmp_ring[index]._line = line;
748 748 }
749 749 #endif /* PRODUCT */
750 750
751 751 // Called by flat profiler
752 752 // Callers have already called wait_for_ext_suspend_completion
753 753 // The assertion for that is currently too complex to put here:
754 754 bool JavaThread::profile_last_Java_frame(frame* _fr) {
755 755 bool gotframe = false;
756 756 // self suspension saves needed state.
757 757 if (has_last_Java_frame() && _anchor.walkable()) {
758 758 *_fr = pd_last_frame();
759 759 gotframe = true;
760 760 }
761 761 return gotframe;
762 762 }
763 763
764 764 void Thread::interrupt(Thread* thread) {
765 765 trace("interrupt", thread);
766 766 debug_only(check_for_dangling_thread_pointer(thread);)
767 767 os::interrupt(thread);
768 768 }
769 769
770 770 bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
771 771 trace("is_interrupted", thread);
772 772 debug_only(check_for_dangling_thread_pointer(thread);)
773 773 // Note: If clear_interrupted==false, this simply fetches and
774 774 // returns the value of the field osthread()->interrupted().
775 775 return os::is_interrupted(thread, clear_interrupted);
776 776 }
777 777
778 778
779 779 // GC Support
780 780 bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
781 781 jint thread_parity = _oops_do_parity;
782 782 if (thread_parity != strong_roots_parity) {
783 783 jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
784 784 if (res == thread_parity) {
785 785 return true;
786 786 } else {
787 787 guarantee(res == strong_roots_parity, "Or else what?");
788 788 assert(SharedHeap::heap()->workers()->active_workers() > 0,
789 789 "Should only fail when parallel.");
790 790 return false;
791 791 }
792 792 }
793 793 assert(SharedHeap::heap()->workers()->active_workers() > 0,
794 794 "Should only fail when parallel.");
795 795 return false;
796 796 }
797 797
798 798 void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
799 799 active_handles()->oops_do(f);
800 800 // Do oop for ThreadShadow
801 801 f->do_oop((oop*)&_pending_exception);
802 802 handle_area()->oops_do(f);
803 803 }
804 804
805 805 void Thread::nmethods_do(CodeBlobClosure* cf) {
806 806 // no nmethods in a generic thread...
807 807 }
808 808
809 809 void Thread::print_on(outputStream* st) const {
810 810 // get_priority assumes osthread initialized
811 811 if (osthread() != NULL) {
812 812 st->print("prio=%d tid=" INTPTR_FORMAT " ", get_priority(this), this);
813 813 osthread()->print_on(st);
814 814 }
815 815 debug_only(if (WizardMode) print_owned_locks_on(st);)
816 816 }
817 817
818 818 // Thread::print_on_error() is called by fatal error handler. Don't use
819 819 // any lock or allocate memory.
820 820 void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
821 821 if (is_VM_thread()) st->print("VMThread");
822 822 else if (is_Compiler_thread()) st->print("CompilerThread");
823 823 else if (is_Java_thread()) st->print("JavaThread");
824 824 else if (is_GC_task_thread()) st->print("GCTaskThread");
825 825 else if (is_Watcher_thread()) st->print("WatcherThread");
826 826 else if (is_ConcurrentGC_thread()) st->print("ConcurrentGCThread");
827 827 else st->print("Thread");
828 828
829 829 st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
830 830 _stack_base - _stack_size, _stack_base);
831 831
832 832 if (osthread()) {
833 833 st->print(" [id=%d]", osthread()->thread_id());
834 834 }
835 835 }
836 836
837 837 #ifdef ASSERT
838 838 void Thread::print_owned_locks_on(outputStream* st) const {
839 839 Monitor *cur = _owned_locks;
840 840 if (cur == NULL) {
841 841 st->print(" (no locks) ");
842 842 } else {
843 843 st->print_cr(" Locks owned:");
844 844 while(cur) {
845 845 cur->print_on(st);
846 846 cur = cur->next();
847 847 }
848 848 }
849 849 }
850 850
851 851 static int ref_use_count = 0;
852 852
853 853 bool Thread::owns_locks_but_compiled_lock() const {
854 854 for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
855 855 if (cur != Compile_lock) return true;
856 856 }
857 857 return false;
858 858 }
859 859
860 860
861 861 #endif
862 862
863 863 #ifndef PRODUCT
864 864
865 865 // The flag: potential_vm_operation notifies if this particular safepoint state could potential
866 866 // invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
867 867 // no threads which allow_vm_block's are held
868 868 void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
869 869 // Check if current thread is allowed to block at a safepoint
870 870 if (!(_allow_safepoint_count == 0))
871 871 fatal("Possible safepoint reached by thread that does not allow it");
872 872 if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
873 873 fatal("LEAF method calling lock?");
874 874 }
875 875
876 876 #ifdef ASSERT
877 877 if (potential_vm_operation && is_Java_thread()
878 878 && !Universe::is_bootstrapping()) {
879 879 // Make sure we do not hold any locks that the VM thread also uses.
880 880 // This could potentially lead to deadlocks
881 881 for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
882 882 // Threads_lock is special, since the safepoint synchronization will not start before this is
883 883 // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
884 884 // since it is used to transfer control between JavaThreads and the VMThread
885 885 // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
886 886 if ( (cur->allow_vm_block() &&
887 887 cur != Threads_lock &&
888 888 cur != Compile_lock && // Temporary: should not be necessary when we get spearate compilation
889 889 cur != VMOperationRequest_lock &&
890 890 cur != VMOperationQueue_lock) ||
891 891 cur->rank() == Mutex::special) {
892 892 warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());
893 893 }
894 894 }
895 895 }
896 896
897 897 if (GCALotAtAllSafepoints) {
898 898 // We could enter a safepoint here and thus have a gc
899 899 InterfaceSupport::check_gc_alot();
900 900 }
901 901 #endif
902 902 }
903 903 #endif
904 904
905 905 bool Thread::is_in_stack(address adr) const {
906 906 assert(Thread::current() == this, "is_in_stack can only be called from current thread");
907 907 address end = os::current_stack_pointer();
908 908 if (stack_base() >= adr && adr >= end) return true;
909 909
910 910 return false;
911 911 }
912 912
913 913
914 914 // We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
915 915 // However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
916 916 // used for compilation in the future. If that change is made, the need for these methods
917 917 // should be revisited, and they should be removed if possible.
918 918
919 919 bool Thread::is_lock_owned(address adr) const {
920 920 return on_local_stack(adr);
921 921 }
922 922
923 923 bool Thread::set_as_starting_thread() {
924 924 // NOTE: this must be called inside the main thread.
925 925 return os::create_main_thread((JavaThread*)this);
926 926 }
927 927
928 928 static void initialize_class(Symbol* class_name, TRAPS) {
929 929 klassOop klass = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
930 930 instanceKlass::cast(klass)->initialize(CHECK);
931 931 }
932 932
933 933
934 934 // Creates the initial ThreadGroup
935 935 static Handle create_initial_thread_group(TRAPS) {
936 936 klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ThreadGroup(), true, CHECK_NH);
937 937 instanceKlassHandle klass (THREAD, k);
938 938
939 939 Handle system_instance = klass->allocate_instance_handle(CHECK_NH);
940 940 {
941 941 JavaValue result(T_VOID);
942 942 JavaCalls::call_special(&result,
943 943 system_instance,
944 944 klass,
945 945 vmSymbols::object_initializer_name(),
946 946 vmSymbols::void_method_signature(),
947 947 CHECK_NH);
948 948 }
949 949 Universe::set_system_thread_group(system_instance());
950 950
951 951 Handle main_instance = klass->allocate_instance_handle(CHECK_NH);
952 952 {
953 953 JavaValue result(T_VOID);
954 954 Handle string = java_lang_String::create_from_str("main", CHECK_NH);
955 955 JavaCalls::call_special(&result,
956 956 main_instance,
957 957 klass,
958 958 vmSymbols::object_initializer_name(),
959 959 vmSymbols::threadgroup_string_void_signature(),
960 960 system_instance,
961 961 string,
962 962 CHECK_NH);
963 963 }
964 964 return main_instance;
965 965 }
966 966
967 967 // Creates the initial Thread
968 968 static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {
969 969 klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL);
970 970 instanceKlassHandle klass (THREAD, k);
971 971 instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
972 972
973 973 java_lang_Thread::set_thread(thread_oop(), thread);
974 974 java_lang_Thread::set_priority(thread_oop(), NormPriority);
975 975 thread->set_threadObj(thread_oop());
976 976
977 977 Handle string = java_lang_String::create_from_str("main", CHECK_NULL);
978 978
979 979 JavaValue result(T_VOID);
980 980 JavaCalls::call_special(&result, thread_oop,
981 981 klass,
982 982 vmSymbols::object_initializer_name(),
983 983 vmSymbols::threadgroup_string_void_signature(),
984 984 thread_group,
985 985 string,
986 986 CHECK_NULL);
987 987 return thread_oop();
988 988 }
↓ open down ↓ |
988 lines elided |
↑ open up ↑ |
989 989
990 990 static void call_initializeSystemClass(TRAPS) {
991 991 klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
992 992 instanceKlassHandle klass (THREAD, k);
993 993
994 994 JavaValue result(T_VOID);
995 995 JavaCalls::call_static(&result, klass, vmSymbols::initializeSystemClass_name(),
996 996 vmSymbols::void_method_signature(), CHECK);
997 997 }
998 998
999 +char java_runtime_name[128] = "";
1000 +
1001 +// extract the JRE name from sun.misc.Version.java_runtime_name
1002 +static const char* get_java_runtime_name(TRAPS) {
1003 + klassOop k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
1004 + Handle(), Handle(), CHECK_AND_CLEAR_NULL);
1005 + fieldDescriptor fd;
1006 + bool found = k != NULL &&
1007 + instanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(),
1008 + vmSymbols::string_signature(), &fd);
1009 + if (found) {
1010 + oop name_oop = k->java_mirror()->obj_field(fd.offset());
1011 + if (name_oop == NULL)
1012 + return NULL;
1013 + const char* name = java_lang_String::as_utf8_string(name_oop,
1014 + java_runtime_name,
1015 + sizeof(java_runtime_name));
1016 + return name;
1017 + } else {
1018 + return NULL;
1019 + }
1020 +}
1021 +
999 1022 // General purpose hook into Java code, run once when the VM is initialized.
1000 1023 // The Java library method itself may be changed independently from the VM.
1001 1024 static void call_postVMInitHook(TRAPS) {
1002 1025 klassOop k = SystemDictionary::PostVMInitHook_klass();
1003 1026 instanceKlassHandle klass (THREAD, k);
1004 1027 if (klass.not_null()) {
1005 1028 JavaValue result(T_VOID);
1006 1029 JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
1007 1030 vmSymbols::void_method_signature(),
1008 1031 CHECK);
1009 1032 }
1010 1033 }
1011 1034
1012 1035 static void reset_vm_info_property(TRAPS) {
1013 1036 // the vm info string
1014 1037 ResourceMark rm(THREAD);
1015 1038 const char *vm_info = VM_Version::vm_info_string();
1016 1039
1017 1040 // java.lang.System class
1018 1041 klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
1019 1042 instanceKlassHandle klass (THREAD, k);
1020 1043
1021 1044 // setProperty arguments
1022 1045 Handle key_str = java_lang_String::create_from_str("java.vm.info", CHECK);
1023 1046 Handle value_str = java_lang_String::create_from_str(vm_info, CHECK);
1024 1047
1025 1048 // return value
1026 1049 JavaValue r(T_OBJECT);
1027 1050
1028 1051 // public static String setProperty(String key, String value);
1029 1052 JavaCalls::call_static(&r,
1030 1053 klass,
1031 1054 vmSymbols::setProperty_name(),
1032 1055 vmSymbols::string_string_string_signature(),
1033 1056 key_str,
1034 1057 value_str,
1035 1058 CHECK);
1036 1059 }
1037 1060
1038 1061
1039 1062 void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS) {
1040 1063 assert(thread_group.not_null(), "thread group should be specified");
1041 1064 assert(threadObj() == NULL, "should only create Java thread object once");
1042 1065
1043 1066 klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
1044 1067 instanceKlassHandle klass (THREAD, k);
1045 1068 instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
1046 1069
1047 1070 java_lang_Thread::set_thread(thread_oop(), this);
1048 1071 java_lang_Thread::set_priority(thread_oop(), NormPriority);
1049 1072 set_threadObj(thread_oop());
1050 1073
1051 1074 JavaValue result(T_VOID);
1052 1075 if (thread_name != NULL) {
1053 1076 Handle name = java_lang_String::create_from_str(thread_name, CHECK);
1054 1077 // Thread gets assigned specified name and null target
1055 1078 JavaCalls::call_special(&result,
1056 1079 thread_oop,
1057 1080 klass,
1058 1081 vmSymbols::object_initializer_name(),
1059 1082 vmSymbols::threadgroup_string_void_signature(),
1060 1083 thread_group, // Argument 1
1061 1084 name, // Argument 2
1062 1085 THREAD);
1063 1086 } else {
1064 1087 // Thread gets assigned name "Thread-nnn" and null target
1065 1088 // (java.lang.Thread doesn't have a constructor taking only a ThreadGroup argument)
1066 1089 JavaCalls::call_special(&result,
1067 1090 thread_oop,
1068 1091 klass,
1069 1092 vmSymbols::object_initializer_name(),
1070 1093 vmSymbols::threadgroup_runnable_void_signature(),
1071 1094 thread_group, // Argument 1
1072 1095 Handle(), // Argument 2
1073 1096 THREAD);
1074 1097 }
1075 1098
1076 1099
1077 1100 if (daemon) {
1078 1101 java_lang_Thread::set_daemon(thread_oop());
1079 1102 }
1080 1103
1081 1104 if (HAS_PENDING_EXCEPTION) {
1082 1105 return;
1083 1106 }
1084 1107
1085 1108 KlassHandle group(this, SystemDictionary::ThreadGroup_klass());
1086 1109 Handle threadObj(this, this->threadObj());
1087 1110
1088 1111 JavaCalls::call_special(&result,
1089 1112 thread_group,
1090 1113 group,
1091 1114 vmSymbols::add_method_name(),
1092 1115 vmSymbols::thread_void_signature(),
1093 1116 threadObj, // Arg 1
1094 1117 THREAD);
1095 1118
1096 1119
1097 1120 }
1098 1121
1099 1122 // NamedThread -- non-JavaThread subclasses with multiple
1100 1123 // uniquely named instances should derive from this.
1101 1124 NamedThread::NamedThread() : Thread() {
1102 1125 _name = NULL;
1103 1126 _processed_thread = NULL;
1104 1127 }
1105 1128
1106 1129 NamedThread::~NamedThread() {
1107 1130 if (_name != NULL) {
1108 1131 FREE_C_HEAP_ARRAY(char, _name);
1109 1132 _name = NULL;
1110 1133 }
1111 1134 }
1112 1135
1113 1136 void NamedThread::set_name(const char* format, ...) {
1114 1137 guarantee(_name == NULL, "Only get to set name once.");
1115 1138 _name = NEW_C_HEAP_ARRAY(char, max_name_len);
1116 1139 guarantee(_name != NULL, "alloc failure");
1117 1140 va_list ap;
1118 1141 va_start(ap, format);
1119 1142 jio_vsnprintf(_name, max_name_len, format, ap);
1120 1143 va_end(ap);
1121 1144 }
1122 1145
1123 1146 // ======= WatcherThread ========
1124 1147
1125 1148 // The watcher thread exists to simulate timer interrupts. It should
1126 1149 // be replaced by an abstraction over whatever native support for
1127 1150 // timer interrupts exists on the platform.
1128 1151
1129 1152 WatcherThread* WatcherThread::_watcher_thread = NULL;
1130 1153 volatile bool WatcherThread::_should_terminate = false;
1131 1154
1132 1155 WatcherThread::WatcherThread() : Thread() {
1133 1156 assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
1134 1157 if (os::create_thread(this, os::watcher_thread)) {
1135 1158 _watcher_thread = this;
1136 1159
1137 1160 // Set the watcher thread to the highest OS priority which should not be
1138 1161 // used, unless a Java thread with priority java.lang.Thread.MAX_PRIORITY
1139 1162 // is created. The only normal thread using this priority is the reference
1140 1163 // handler thread, which runs for very short intervals only.
1141 1164 // If the VMThread's priority is not lower than the WatcherThread profiling
1142 1165 // will be inaccurate.
1143 1166 os::set_priority(this, MaxPriority);
1144 1167 if (!DisableStartThread) {
1145 1168 os::start_thread(this);
1146 1169 }
1147 1170 }
1148 1171 }
1149 1172
1150 1173 void WatcherThread::run() {
1151 1174 assert(this == watcher_thread(), "just checking");
1152 1175
1153 1176 this->record_stack_base_and_size();
1154 1177 this->initialize_thread_local_storage();
1155 1178 this->set_active_handles(JNIHandleBlock::allocate_block());
1156 1179 while(!_should_terminate) {
1157 1180 assert(watcher_thread() == Thread::current(), "thread consistency check");
1158 1181 assert(watcher_thread() == this, "thread consistency check");
1159 1182
1160 1183 // Calculate how long it'll be until the next PeriodicTask work
1161 1184 // should be done, and sleep that amount of time.
1162 1185 size_t time_to_wait = PeriodicTask::time_to_wait();
1163 1186
1164 1187 // we expect this to timeout - we only ever get unparked when
1165 1188 // we should terminate
1166 1189 {
1167 1190 OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
1168 1191
1169 1192 jlong prev_time = os::javaTimeNanos();
1170 1193 for (;;) {
1171 1194 int res= _SleepEvent->park(time_to_wait);
1172 1195 if (res == OS_TIMEOUT || _should_terminate)
1173 1196 break;
1174 1197 // spurious wakeup of some kind
1175 1198 jlong now = os::javaTimeNanos();
1176 1199 time_to_wait -= (now - prev_time) / 1000000;
1177 1200 if (time_to_wait <= 0)
1178 1201 break;
1179 1202 prev_time = now;
1180 1203 }
1181 1204 }
1182 1205
1183 1206 if (is_error_reported()) {
1184 1207 // A fatal error has happened, the error handler(VMError::report_and_die)
1185 1208 // should abort JVM after creating an error log file. However in some
1186 1209 // rare cases, the error handler itself might deadlock. Here we try to
1187 1210 // kill JVM if the fatal error handler fails to abort in 2 minutes.
1188 1211 //
1189 1212 // This code is in WatcherThread because WatcherThread wakes up
1190 1213 // periodically so the fatal error handler doesn't need to do anything;
1191 1214 // also because the WatcherThread is less likely to crash than other
1192 1215 // threads.
1193 1216
1194 1217 for (;;) {
1195 1218 if (!ShowMessageBoxOnError
1196 1219 && (OnError == NULL || OnError[0] == '\0')
1197 1220 && Arguments::abort_hook() == NULL) {
1198 1221 os::sleep(this, 2 * 60 * 1000, false);
1199 1222 fdStream err(defaultStream::output_fd());
1200 1223 err.print_raw_cr("# [ timer expired, abort... ]");
1201 1224 // skip atexit/vm_exit/vm_abort hooks
1202 1225 os::die();
1203 1226 }
1204 1227
1205 1228 // Wake up 5 seconds later, the fatal handler may reset OnError or
1206 1229 // ShowMessageBoxOnError when it is ready to abort.
1207 1230 os::sleep(this, 5 * 1000, false);
1208 1231 }
1209 1232 }
1210 1233
1211 1234 PeriodicTask::real_time_tick(time_to_wait);
1212 1235
1213 1236 // If we have no more tasks left due to dynamic disenrollment,
1214 1237 // shut down the thread since we don't currently support dynamic enrollment
1215 1238 if (PeriodicTask::num_tasks() == 0) {
1216 1239 _should_terminate = true;
1217 1240 }
1218 1241 }
1219 1242
1220 1243 // Signal that it is terminated
1221 1244 {
1222 1245 MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
1223 1246 _watcher_thread = NULL;
1224 1247 Terminator_lock->notify();
1225 1248 }
1226 1249
1227 1250 // Thread destructor usually does this..
1228 1251 ThreadLocalStorage::set_thread(NULL);
1229 1252 }
1230 1253
1231 1254 void WatcherThread::start() {
1232 1255 if (watcher_thread() == NULL) {
1233 1256 _should_terminate = false;
1234 1257 // Create the single instance of WatcherThread
1235 1258 new WatcherThread();
1236 1259 }
1237 1260 }
1238 1261
1239 1262 void WatcherThread::stop() {
1240 1263 // it is ok to take late safepoints here, if needed
1241 1264 MutexLocker mu(Terminator_lock);
1242 1265 _should_terminate = true;
1243 1266 OrderAccess::fence(); // ensure WatcherThread sees update in main loop
1244 1267
1245 1268 Thread* watcher = watcher_thread();
1246 1269 if (watcher != NULL)
1247 1270 watcher->_SleepEvent->unpark();
1248 1271
1249 1272 while(watcher_thread() != NULL) {
1250 1273 // This wait should make safepoint checks, wait without a timeout,
1251 1274 // and wait as a suspend-equivalent condition.
1252 1275 //
1253 1276 // Note: If the FlatProfiler is running, then this thread is waiting
1254 1277 // for the WatcherThread to terminate and the WatcherThread, via the
1255 1278 // FlatProfiler task, is waiting for the external suspend request on
1256 1279 // this thread to complete. wait_for_ext_suspend_completion() will
1257 1280 // eventually timeout, but that takes time. Making this wait a
1258 1281 // suspend-equivalent condition solves that timeout problem.
1259 1282 //
1260 1283 Terminator_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
1261 1284 Mutex::_as_suspend_equivalent_flag);
1262 1285 }
1263 1286 }
1264 1287
1265 1288 void WatcherThread::print_on(outputStream* st) const {
1266 1289 st->print("\"%s\" ", name());
1267 1290 Thread::print_on(st);
1268 1291 st->cr();
1269 1292 }
1270 1293
1271 1294 // ======= JavaThread ========
1272 1295
1273 1296 // A JavaThread is a normal Java thread
1274 1297
1275 1298 void JavaThread::initialize() {
1276 1299 // Initialize fields
1277 1300
1278 1301 // Set the claimed par_id to -1 (ie not claiming any par_ids)
1279 1302 set_claimed_par_id(-1);
1280 1303
1281 1304 set_saved_exception_pc(NULL);
1282 1305 set_threadObj(NULL);
1283 1306 _anchor.clear();
1284 1307 set_entry_point(NULL);
1285 1308 set_jni_functions(jni_functions());
1286 1309 set_callee_target(NULL);
1287 1310 set_vm_result(NULL);
1288 1311 set_vm_result_2(NULL);
1289 1312 set_vframe_array_head(NULL);
1290 1313 set_vframe_array_last(NULL);
1291 1314 set_deferred_locals(NULL);
1292 1315 set_deopt_mark(NULL);
1293 1316 set_deopt_nmethod(NULL);
1294 1317 clear_must_deopt_id();
1295 1318 set_monitor_chunks(NULL);
1296 1319 set_next(NULL);
1297 1320 set_thread_state(_thread_new);
1298 1321 _terminated = _not_terminated;
1299 1322 _privileged_stack_top = NULL;
1300 1323 _array_for_gc = NULL;
1301 1324 _suspend_equivalent = false;
1302 1325 _in_deopt_handler = 0;
1303 1326 _doing_unsafe_access = false;
1304 1327 _stack_guard_state = stack_guard_unused;
1305 1328 _exception_oop = NULL;
1306 1329 _exception_pc = 0;
1307 1330 _exception_handler_pc = 0;
1308 1331 _is_method_handle_return = 0;
1309 1332 _jvmti_thread_state= NULL;
1310 1333 _should_post_on_exceptions_flag = JNI_FALSE;
1311 1334 _jvmti_get_loaded_classes_closure = NULL;
1312 1335 _interp_only_mode = 0;
1313 1336 _special_runtime_exit_condition = _no_async_condition;
1314 1337 _pending_async_exception = NULL;
1315 1338 _is_compiling = false;
1316 1339 _thread_stat = NULL;
1317 1340 _thread_stat = new ThreadStatistics();
1318 1341 _blocked_on_compilation = false;
1319 1342 _jni_active_critical = 0;
1320 1343 _do_not_unlock_if_synchronized = false;
1321 1344 _cached_monitor_info = NULL;
1322 1345 _parker = Parker::Allocate(this) ;
1323 1346
1324 1347 #ifndef PRODUCT
1325 1348 _jmp_ring_index = 0;
1326 1349 for (int ji = 0 ; ji < jump_ring_buffer_size ; ji++ ) {
1327 1350 record_jump(NULL, NULL, NULL, 0);
1328 1351 }
1329 1352 #endif /* PRODUCT */
1330 1353
1331 1354 set_thread_profiler(NULL);
1332 1355 if (FlatProfiler::is_active()) {
1333 1356 // This is where we would decide to either give each thread it's own profiler
1334 1357 // or use one global one from FlatProfiler,
1335 1358 // or up to some count of the number of profiled threads, etc.
1336 1359 ThreadProfiler* pp = new ThreadProfiler();
1337 1360 pp->engage();
1338 1361 set_thread_profiler(pp);
1339 1362 }
1340 1363
1341 1364 // Setup safepoint state info for this thread
1342 1365 ThreadSafepointState::create(this);
1343 1366
1344 1367 debug_only(_java_call_counter = 0);
1345 1368
1346 1369 // JVMTI PopFrame support
1347 1370 _popframe_condition = popframe_inactive;
1348 1371 _popframe_preserved_args = NULL;
1349 1372 _popframe_preserved_args_size = 0;
1350 1373
1351 1374 pd_initialize();
1352 1375 }
1353 1376
1354 1377 #ifndef SERIALGC
1355 1378 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
1356 1379 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
1357 1380 #endif // !SERIALGC
1358 1381
1359 1382 JavaThread::JavaThread(bool is_attaching_via_jni) :
1360 1383 Thread()
1361 1384 #ifndef SERIALGC
1362 1385 , _satb_mark_queue(&_satb_mark_queue_set),
1363 1386 _dirty_card_queue(&_dirty_card_queue_set)
1364 1387 #endif // !SERIALGC
1365 1388 {
1366 1389 initialize();
1367 1390 if (is_attaching_via_jni) {
1368 1391 _jni_attach_state = _attaching_via_jni;
1369 1392 } else {
1370 1393 _jni_attach_state = _not_attaching_via_jni;
1371 1394 }
1372 1395 assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
1373 1396 }
1374 1397
1375 1398 bool JavaThread::reguard_stack(address cur_sp) {
1376 1399 if (_stack_guard_state != stack_guard_yellow_disabled) {
1377 1400 return true; // Stack already guarded or guard pages not needed.
1378 1401 }
1379 1402
1380 1403 if (register_stack_overflow()) {
1381 1404 // For those architectures which have separate register and
1382 1405 // memory stacks, we must check the register stack to see if
1383 1406 // it has overflowed.
1384 1407 return false;
1385 1408 }
1386 1409
1387 1410 // Java code never executes within the yellow zone: the latter is only
1388 1411 // there to provoke an exception during stack banging. If java code
1389 1412 // is executing there, either StackShadowPages should be larger, or
1390 1413 // some exception code in c1, c2 or the interpreter isn't unwinding
1391 1414 // when it should.
1392 1415 guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
1393 1416
1394 1417 enable_stack_yellow_zone();
1395 1418 return true;
1396 1419 }
1397 1420
1398 1421 bool JavaThread::reguard_stack(void) {
1399 1422 return reguard_stack(os::current_stack_pointer());
1400 1423 }
1401 1424
1402 1425
1403 1426 void JavaThread::block_if_vm_exited() {
1404 1427 if (_terminated == _vm_exited) {
1405 1428 // _vm_exited is set at safepoint, and Threads_lock is never released
1406 1429 // we will block here forever
1407 1430 Threads_lock->lock_without_safepoint_check();
1408 1431 ShouldNotReachHere();
1409 1432 }
1410 1433 }
1411 1434
1412 1435
1413 1436 // Remove this ifdef when C1 is ported to the compiler interface.
1414 1437 static void compiler_thread_entry(JavaThread* thread, TRAPS);
1415 1438
1416 1439 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
1417 1440 Thread()
1418 1441 #ifndef SERIALGC
1419 1442 , _satb_mark_queue(&_satb_mark_queue_set),
1420 1443 _dirty_card_queue(&_dirty_card_queue_set)
1421 1444 #endif // !SERIALGC
1422 1445 {
1423 1446 if (TraceThreadEvents) {
1424 1447 tty->print_cr("creating thread %p", this);
1425 1448 }
1426 1449 initialize();
1427 1450 _jni_attach_state = _not_attaching_via_jni;
1428 1451 set_entry_point(entry_point);
1429 1452 // Create the native thread itself.
1430 1453 // %note runtime_23
1431 1454 os::ThreadType thr_type = os::java_thread;
1432 1455 thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
1433 1456 os::java_thread;
1434 1457 os::create_thread(this, thr_type, stack_sz);
1435 1458
1436 1459 // The _osthread may be NULL here because we ran out of memory (too many threads active).
1437 1460 // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
1438 1461 // may hold a lock and all locks must be unlocked before throwing the exception (throwing
1439 1462 // the exception consists of creating the exception object & initializing it, initialization
1440 1463 // will leave the VM via a JavaCall and then all locks must be unlocked).
1441 1464 //
1442 1465 // The thread is still suspended when we reach here. Thread must be explicit started
1443 1466 // by creator! Furthermore, the thread must also explicitly be added to the Threads list
1444 1467 // by calling Threads:add. The reason why this is not done here, is because the thread
1445 1468 // object must be fully initialized (take a look at JVM_Start)
1446 1469 }
1447 1470
1448 1471 JavaThread::~JavaThread() {
1449 1472 if (TraceThreadEvents) {
1450 1473 tty->print_cr("terminate thread %p", this);
1451 1474 }
1452 1475
1453 1476 // JSR166 -- return the parker to the free list
1454 1477 Parker::Release(_parker);
1455 1478 _parker = NULL ;
1456 1479
1457 1480 // Free any remaining previous UnrollBlock
1458 1481 vframeArray* old_array = vframe_array_last();
1459 1482
1460 1483 if (old_array != NULL) {
1461 1484 Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
1462 1485 old_array->set_unroll_block(NULL);
1463 1486 delete old_info;
1464 1487 delete old_array;
1465 1488 }
1466 1489
1467 1490 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = deferred_locals();
1468 1491 if (deferred != NULL) {
1469 1492 // This can only happen if thread is destroyed before deoptimization occurs.
1470 1493 assert(deferred->length() != 0, "empty array!");
1471 1494 do {
1472 1495 jvmtiDeferredLocalVariableSet* dlv = deferred->at(0);
1473 1496 deferred->remove_at(0);
1474 1497 // individual jvmtiDeferredLocalVariableSet are CHeapObj's
1475 1498 delete dlv;
1476 1499 } while (deferred->length() != 0);
1477 1500 delete deferred;
1478 1501 }
1479 1502
1480 1503 // All Java related clean up happens in exit
1481 1504 ThreadSafepointState::destroy(this);
1482 1505 if (_thread_profiler != NULL) delete _thread_profiler;
1483 1506 if (_thread_stat != NULL) delete _thread_stat;
1484 1507 }
1485 1508
1486 1509
1487 1510 // The first routine called by a new Java thread
1488 1511 void JavaThread::run() {
1489 1512 // initialize thread-local alloc buffer related fields
1490 1513 this->initialize_tlab();
1491 1514
1492 1515 // used to test validitity of stack trace backs
1493 1516 this->record_base_of_stack_pointer();
1494 1517
1495 1518 // Record real stack base and size.
1496 1519 this->record_stack_base_and_size();
1497 1520
1498 1521 // Initialize thread local storage; set before calling MutexLocker
1499 1522 this->initialize_thread_local_storage();
1500 1523
1501 1524 this->create_stack_guard_pages();
1502 1525
1503 1526 this->cache_global_variables();
1504 1527
1505 1528 // Thread is now sufficient initialized to be handled by the safepoint code as being
1506 1529 // in the VM. Change thread state from _thread_new to _thread_in_vm
1507 1530 ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
1508 1531
1509 1532 assert(JavaThread::current() == this, "sanity check");
1510 1533 assert(!Thread::current()->owns_locks(), "sanity check");
1511 1534
1512 1535 DTRACE_THREAD_PROBE(start, this);
1513 1536
1514 1537 // This operation might block. We call that after all safepoint checks for a new thread has
1515 1538 // been completed.
1516 1539 this->set_active_handles(JNIHandleBlock::allocate_block());
1517 1540
1518 1541 if (JvmtiExport::should_post_thread_life()) {
1519 1542 JvmtiExport::post_thread_start(this);
1520 1543 }
1521 1544
1522 1545 EVENT_BEGIN(TraceEventThreadStart, event);
1523 1546 EVENT_COMMIT(event,
1524 1547 EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
1525 1548
1526 1549 // We call another function to do the rest so we are sure that the stack addresses used
1527 1550 // from there will be lower than the stack base just computed
1528 1551 thread_main_inner();
1529 1552
1530 1553 // Note, thread is no longer valid at this point!
1531 1554 }
1532 1555
1533 1556
1534 1557 void JavaThread::thread_main_inner() {
1535 1558 assert(JavaThread::current() == this, "sanity check");
1536 1559 assert(this->threadObj() != NULL, "just checking");
1537 1560
1538 1561 // Execute thread entry point unless this thread has a pending exception
1539 1562 // or has been stopped before starting.
1540 1563 // Note: Due to JVM_StopThread we can have pending exceptions already!
1541 1564 if (!this->has_pending_exception() &&
1542 1565 !java_lang_Thread::is_stillborn(this->threadObj())) {
1543 1566 {
1544 1567 ResourceMark rm(this);
1545 1568 this->set_native_thread_name(this->get_thread_name());
1546 1569 }
1547 1570 HandleMark hm(this);
1548 1571 this->entry_point()(this, this);
1549 1572 }
1550 1573
1551 1574 DTRACE_THREAD_PROBE(stop, this);
1552 1575
1553 1576 this->exit(false);
1554 1577 delete this;
1555 1578 }
1556 1579
1557 1580
1558 1581 static void ensure_join(JavaThread* thread) {
1559 1582 // We do not need to grap the Threads_lock, since we are operating on ourself.
1560 1583 Handle threadObj(thread, thread->threadObj());
1561 1584 assert(threadObj.not_null(), "java thread object must exist");
1562 1585 ObjectLocker lock(threadObj, thread);
1563 1586 // Ignore pending exception (ThreadDeath), since we are exiting anyway
1564 1587 thread->clear_pending_exception();
1565 1588 // Thread is exiting. So set thread_status field in java.lang.Thread class to TERMINATED.
1566 1589 java_lang_Thread::set_thread_status(threadObj(), java_lang_Thread::TERMINATED);
1567 1590 // Clear the native thread instance - this makes isAlive return false and allows the join()
1568 1591 // to complete once we've done the notify_all below
1569 1592 java_lang_Thread::set_thread(threadObj(), NULL);
1570 1593 lock.notify_all(thread);
1571 1594 // Ignore pending exception (ThreadDeath), since we are exiting anyway
1572 1595 thread->clear_pending_exception();
1573 1596 }
1574 1597
1575 1598
1576 1599 // For any new cleanup additions, please check to see if they need to be applied to
1577 1600 // cleanup_failed_attach_current_thread as well.
1578 1601 void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
1579 1602 assert(this == JavaThread::current(), "thread consistency check");
1580 1603 if (!InitializeJavaLangSystem) return;
1581 1604
1582 1605 HandleMark hm(this);
1583 1606 Handle uncaught_exception(this, this->pending_exception());
1584 1607 this->clear_pending_exception();
1585 1608 Handle threadObj(this, this->threadObj());
1586 1609 assert(threadObj.not_null(), "Java thread object should be created");
1587 1610
1588 1611 if (get_thread_profiler() != NULL) {
1589 1612 get_thread_profiler()->disengage();
1590 1613 ResourceMark rm;
1591 1614 get_thread_profiler()->print(get_thread_name());
1592 1615 }
1593 1616
1594 1617
1595 1618 // FIXIT: This code should be moved into else part, when reliable 1.2/1.3 check is in place
1596 1619 {
1597 1620 EXCEPTION_MARK;
1598 1621
1599 1622 CLEAR_PENDING_EXCEPTION;
1600 1623 }
1601 1624 // FIXIT: The is_null check is only so it works better on JDK1.2 VM's. This
1602 1625 // has to be fixed by a runtime query method
1603 1626 if (!destroy_vm || JDK_Version::is_jdk12x_version()) {
1604 1627 // JSR-166: change call from from ThreadGroup.uncaughtException to
1605 1628 // java.lang.Thread.dispatchUncaughtException
1606 1629 if (uncaught_exception.not_null()) {
1607 1630 Handle group(this, java_lang_Thread::threadGroup(threadObj()));
1608 1631 {
1609 1632 EXCEPTION_MARK;
1610 1633 // Check if the method Thread.dispatchUncaughtException() exists. If so
1611 1634 // call it. Otherwise we have an older library without the JSR-166 changes,
1612 1635 // so call ThreadGroup.uncaughtException()
1613 1636 KlassHandle recvrKlass(THREAD, threadObj->klass());
1614 1637 CallInfo callinfo;
1615 1638 KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
1616 1639 LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
1617 1640 vmSymbols::dispatchUncaughtException_name(),
1618 1641 vmSymbols::throwable_void_signature(),
1619 1642 KlassHandle(), false, false, THREAD);
1620 1643 CLEAR_PENDING_EXCEPTION;
1621 1644 methodHandle method = callinfo.selected_method();
1622 1645 if (method.not_null()) {
1623 1646 JavaValue result(T_VOID);
1624 1647 JavaCalls::call_virtual(&result,
1625 1648 threadObj, thread_klass,
1626 1649 vmSymbols::dispatchUncaughtException_name(),
1627 1650 vmSymbols::throwable_void_signature(),
1628 1651 uncaught_exception,
1629 1652 THREAD);
1630 1653 } else {
1631 1654 KlassHandle thread_group(THREAD, SystemDictionary::ThreadGroup_klass());
1632 1655 JavaValue result(T_VOID);
1633 1656 JavaCalls::call_virtual(&result,
1634 1657 group, thread_group,
1635 1658 vmSymbols::uncaughtException_name(),
1636 1659 vmSymbols::thread_throwable_void_signature(),
1637 1660 threadObj, // Arg 1
1638 1661 uncaught_exception, // Arg 2
1639 1662 THREAD);
1640 1663 }
1641 1664 if (HAS_PENDING_EXCEPTION) {
1642 1665 ResourceMark rm(this);
1643 1666 jio_fprintf(defaultStream::error_stream(),
1644 1667 "\nException: %s thrown from the UncaughtExceptionHandler"
1645 1668 " in thread \"%s\"\n",
1646 1669 Klass::cast(pending_exception()->klass())->external_name(),
1647 1670 get_thread_name());
1648 1671 CLEAR_PENDING_EXCEPTION;
1649 1672 }
1650 1673 }
1651 1674 }
1652 1675
1653 1676 // Called before the java thread exit since we want to read info
1654 1677 // from java_lang_Thread object
1655 1678 EVENT_BEGIN(TraceEventThreadEnd, event);
1656 1679 EVENT_COMMIT(event,
1657 1680 EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
1658 1681
1659 1682 // Call after last event on thread
1660 1683 EVENT_THREAD_EXIT(this);
1661 1684
1662 1685 // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
1663 1686 // the execution of the method. If that is not enough, then we don't really care. Thread.stop
1664 1687 // is deprecated anyhow.
1665 1688 { int count = 3;
1666 1689 while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
1667 1690 EXCEPTION_MARK;
1668 1691 JavaValue result(T_VOID);
1669 1692 KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
1670 1693 JavaCalls::call_virtual(&result,
1671 1694 threadObj, thread_klass,
1672 1695 vmSymbols::exit_method_name(),
1673 1696 vmSymbols::void_method_signature(),
1674 1697 THREAD);
1675 1698 CLEAR_PENDING_EXCEPTION;
1676 1699 }
1677 1700 }
1678 1701
1679 1702 // notify JVMTI
1680 1703 if (JvmtiExport::should_post_thread_life()) {
1681 1704 JvmtiExport::post_thread_end(this);
1682 1705 }
1683 1706
1684 1707 // We have notified the agents that we are exiting, before we go on,
1685 1708 // we must check for a pending external suspend request and honor it
1686 1709 // in order to not surprise the thread that made the suspend request.
1687 1710 while (true) {
1688 1711 {
1689 1712 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1690 1713 if (!is_external_suspend()) {
1691 1714 set_terminated(_thread_exiting);
1692 1715 ThreadService::current_thread_exiting(this);
1693 1716 break;
1694 1717 }
1695 1718 // Implied else:
1696 1719 // Things get a little tricky here. We have a pending external
1697 1720 // suspend request, but we are holding the SR_lock so we
1698 1721 // can't just self-suspend. So we temporarily drop the lock
1699 1722 // and then self-suspend.
1700 1723 }
1701 1724
1702 1725 ThreadBlockInVM tbivm(this);
1703 1726 java_suspend_self();
1704 1727
1705 1728 // We're done with this suspend request, but we have to loop around
1706 1729 // and check again. Eventually we will get SR_lock without a pending
1707 1730 // external suspend request and will be able to mark ourselves as
1708 1731 // exiting.
1709 1732 }
1710 1733 // no more external suspends are allowed at this point
1711 1734 } else {
1712 1735 // before_exit() has already posted JVMTI THREAD_END events
1713 1736 }
1714 1737
1715 1738 // Notify waiters on thread object. This has to be done after exit() is called
1716 1739 // on the thread (if the thread is the last thread in a daemon ThreadGroup the
1717 1740 // group should have the destroyed bit set before waiters are notified).
1718 1741 ensure_join(this);
1719 1742 assert(!this->has_pending_exception(), "ensure_join should have cleared");
1720 1743
1721 1744 // 6282335 JNI DetachCurrentThread spec states that all Java monitors
1722 1745 // held by this thread must be released. A detach operation must only
1723 1746 // get here if there are no Java frames on the stack. Therefore, any
1724 1747 // owned monitors at this point MUST be JNI-acquired monitors which are
1725 1748 // pre-inflated and in the monitor cache.
1726 1749 //
1727 1750 // ensure_join() ignores IllegalThreadStateExceptions, and so does this.
1728 1751 if (exit_type == jni_detach && JNIDetachReleasesMonitors) {
1729 1752 assert(!this->has_last_Java_frame(), "detaching with Java frames?");
1730 1753 ObjectSynchronizer::release_monitors_owned_by_thread(this);
1731 1754 assert(!this->has_pending_exception(), "release_monitors should have cleared");
1732 1755 }
1733 1756
1734 1757 // These things needs to be done while we are still a Java Thread. Make sure that thread
1735 1758 // is in a consistent state, in case GC happens
1736 1759 assert(_privileged_stack_top == NULL, "must be NULL when we get here");
1737 1760
1738 1761 if (active_handles() != NULL) {
1739 1762 JNIHandleBlock* block = active_handles();
1740 1763 set_active_handles(NULL);
1741 1764 JNIHandleBlock::release_block(block);
1742 1765 }
1743 1766
1744 1767 if (free_handle_block() != NULL) {
1745 1768 JNIHandleBlock* block = free_handle_block();
1746 1769 set_free_handle_block(NULL);
1747 1770 JNIHandleBlock::release_block(block);
1748 1771 }
1749 1772
1750 1773 // These have to be removed while this is still a valid thread.
1751 1774 remove_stack_guard_pages();
1752 1775
1753 1776 if (UseTLAB) {
1754 1777 tlab().make_parsable(true); // retire TLAB
1755 1778 }
1756 1779
1757 1780 if (JvmtiEnv::environments_might_exist()) {
1758 1781 JvmtiExport::cleanup_thread(this);
1759 1782 }
1760 1783
1761 1784 #ifndef SERIALGC
1762 1785 // We must flush G1-related buffers before removing a thread from
1763 1786 // the list of active threads.
1764 1787 if (UseG1GC) {
1765 1788 flush_barrier_queues();
1766 1789 }
1767 1790 #endif
1768 1791
1769 1792 // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
1770 1793 Threads::remove(this);
1771 1794 }
1772 1795
1773 1796 #ifndef SERIALGC
1774 1797 // Flush G1-related queues.
1775 1798 void JavaThread::flush_barrier_queues() {
1776 1799 satb_mark_queue().flush();
1777 1800 dirty_card_queue().flush();
1778 1801 }
1779 1802
1780 1803 void JavaThread::initialize_queues() {
1781 1804 assert(!SafepointSynchronize::is_at_safepoint(),
1782 1805 "we should not be at a safepoint");
1783 1806
1784 1807 ObjPtrQueue& satb_queue = satb_mark_queue();
1785 1808 SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
1786 1809 // The SATB queue should have been constructed with its active
1787 1810 // field set to false.
1788 1811 assert(!satb_queue.is_active(), "SATB queue should not be active");
1789 1812 assert(satb_queue.is_empty(), "SATB queue should be empty");
1790 1813 // If we are creating the thread during a marking cycle, we should
1791 1814 // set the active field of the SATB queue to true.
1792 1815 if (satb_queue_set.is_active()) {
1793 1816 satb_queue.set_active(true);
1794 1817 }
1795 1818
1796 1819 DirtyCardQueue& dirty_queue = dirty_card_queue();
1797 1820 // The dirty card queue should have been constructed with its
1798 1821 // active field set to true.
1799 1822 assert(dirty_queue.is_active(), "dirty card queue should be active");
1800 1823 }
1801 1824 #endif // !SERIALGC
1802 1825
1803 1826 void JavaThread::cleanup_failed_attach_current_thread() {
1804 1827 if (get_thread_profiler() != NULL) {
1805 1828 get_thread_profiler()->disengage();
1806 1829 ResourceMark rm;
1807 1830 get_thread_profiler()->print(get_thread_name());
1808 1831 }
1809 1832
1810 1833 if (active_handles() != NULL) {
1811 1834 JNIHandleBlock* block = active_handles();
1812 1835 set_active_handles(NULL);
1813 1836 JNIHandleBlock::release_block(block);
1814 1837 }
1815 1838
1816 1839 if (free_handle_block() != NULL) {
1817 1840 JNIHandleBlock* block = free_handle_block();
1818 1841 set_free_handle_block(NULL);
1819 1842 JNIHandleBlock::release_block(block);
1820 1843 }
1821 1844
1822 1845 // These have to be removed while this is still a valid thread.
1823 1846 remove_stack_guard_pages();
1824 1847
1825 1848 if (UseTLAB) {
1826 1849 tlab().make_parsable(true); // retire TLAB, if any
1827 1850 }
1828 1851
1829 1852 #ifndef SERIALGC
1830 1853 if (UseG1GC) {
1831 1854 flush_barrier_queues();
1832 1855 }
1833 1856 #endif
1834 1857
1835 1858 Threads::remove(this);
1836 1859 delete this;
1837 1860 }
1838 1861
1839 1862
1840 1863
1841 1864
1842 1865 JavaThread* JavaThread::active() {
1843 1866 Thread* thread = ThreadLocalStorage::thread();
1844 1867 assert(thread != NULL, "just checking");
1845 1868 if (thread->is_Java_thread()) {
1846 1869 return (JavaThread*) thread;
1847 1870 } else {
1848 1871 assert(thread->is_VM_thread(), "this must be a vm thread");
1849 1872 VM_Operation* op = ((VMThread*) thread)->vm_operation();
1850 1873 JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
1851 1874 assert(ret->is_Java_thread(), "must be a Java thread");
1852 1875 return ret;
1853 1876 }
1854 1877 }
1855 1878
1856 1879 bool JavaThread::is_lock_owned(address adr) const {
1857 1880 if (Thread::is_lock_owned(adr)) return true;
1858 1881
1859 1882 for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
1860 1883 if (chunk->contains(adr)) return true;
1861 1884 }
1862 1885
1863 1886 return false;
1864 1887 }
1865 1888
1866 1889
1867 1890 void JavaThread::add_monitor_chunk(MonitorChunk* chunk) {
1868 1891 chunk->set_next(monitor_chunks());
1869 1892 set_monitor_chunks(chunk);
1870 1893 }
1871 1894
1872 1895 void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) {
1873 1896 guarantee(monitor_chunks() != NULL, "must be non empty");
1874 1897 if (monitor_chunks() == chunk) {
1875 1898 set_monitor_chunks(chunk->next());
1876 1899 } else {
1877 1900 MonitorChunk* prev = monitor_chunks();
1878 1901 while (prev->next() != chunk) prev = prev->next();
1879 1902 prev->set_next(chunk->next());
1880 1903 }
1881 1904 }
1882 1905
1883 1906 // JVM support.
1884 1907
1885 1908 // Note: this function shouldn't block if it's called in
1886 1909 // _thread_in_native_trans state (such as from
1887 1910 // check_special_condition_for_native_trans()).
1888 1911 void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
1889 1912
1890 1913 if (has_last_Java_frame() && has_async_condition()) {
1891 1914 // If we are at a polling page safepoint (not a poll return)
1892 1915 // then we must defer async exception because live registers
1893 1916 // will be clobbered by the exception path. Poll return is
1894 1917 // ok because the call we a returning from already collides
1895 1918 // with exception handling registers and so there is no issue.
1896 1919 // (The exception handling path kills call result registers but
1897 1920 // this is ok since the exception kills the result anyway).
1898 1921
1899 1922 if (is_at_poll_safepoint()) {
1900 1923 // if the code we are returning to has deoptimized we must defer
1901 1924 // the exception otherwise live registers get clobbered on the
1902 1925 // exception path before deoptimization is able to retrieve them.
1903 1926 //
1904 1927 RegisterMap map(this, false);
1905 1928 frame caller_fr = last_frame().sender(&map);
1906 1929 assert(caller_fr.is_compiled_frame(), "what?");
1907 1930 if (caller_fr.is_deoptimized_frame()) {
1908 1931 if (TraceExceptions) {
1909 1932 ResourceMark rm;
1910 1933 tty->print_cr("deferred async exception at compiled safepoint");
1911 1934 }
1912 1935 return;
1913 1936 }
1914 1937 }
1915 1938 }
1916 1939
1917 1940 JavaThread::AsyncRequests condition = clear_special_runtime_exit_condition();
1918 1941 if (condition == _no_async_condition) {
1919 1942 // Conditions have changed since has_special_runtime_exit_condition()
1920 1943 // was called:
1921 1944 // - if we were here only because of an external suspend request,
1922 1945 // then that was taken care of above (or cancelled) so we are done
1923 1946 // - if we were here because of another async request, then it has
1924 1947 // been cleared between the has_special_runtime_exit_condition()
1925 1948 // and now so again we are done
1926 1949 return;
1927 1950 }
1928 1951
1929 1952 // Check for pending async. exception
1930 1953 if (_pending_async_exception != NULL) {
1931 1954 // Only overwrite an already pending exception, if it is not a threadDeath.
1932 1955 if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())) {
1933 1956
1934 1957 // We cannot call Exceptions::_throw(...) here because we cannot block
1935 1958 set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
1936 1959
1937 1960 if (TraceExceptions) {
1938 1961 ResourceMark rm;
1939 1962 tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
1940 1963 if (has_last_Java_frame() ) {
1941 1964 frame f = last_frame();
1942 1965 tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
1943 1966 }
1944 1967 tty->print_cr(" of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
1945 1968 }
1946 1969 _pending_async_exception = NULL;
1947 1970 clear_has_async_exception();
1948 1971 }
1949 1972 }
1950 1973
1951 1974 if (check_unsafe_error &&
1952 1975 condition == _async_unsafe_access_error && !has_pending_exception()) {
1953 1976 condition = _no_async_condition; // done
1954 1977 switch (thread_state()) {
1955 1978 case _thread_in_vm:
1956 1979 {
1957 1980 JavaThread* THREAD = this;
1958 1981 THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
1959 1982 }
1960 1983 case _thread_in_native:
1961 1984 {
1962 1985 ThreadInVMfromNative tiv(this);
1963 1986 JavaThread* THREAD = this;
1964 1987 THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
1965 1988 }
1966 1989 case _thread_in_Java:
1967 1990 {
1968 1991 ThreadInVMfromJava tiv(this);
1969 1992 JavaThread* THREAD = this;
1970 1993 THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
1971 1994 }
1972 1995 default:
1973 1996 ShouldNotReachHere();
1974 1997 }
1975 1998 }
1976 1999
1977 2000 assert(condition == _no_async_condition || has_pending_exception() ||
1978 2001 (!check_unsafe_error && condition == _async_unsafe_access_error),
1979 2002 "must have handled the async condition, if no exception");
1980 2003 }
1981 2004
1982 2005 void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
1983 2006 //
1984 2007 // Check for pending external suspend. Internal suspend requests do
1985 2008 // not use handle_special_runtime_exit_condition().
1986 2009 // If JNIEnv proxies are allowed, don't self-suspend if the target
1987 2010 // thread is not the current thread. In older versions of jdbx, jdbx
1988 2011 // threads could call into the VM with another thread's JNIEnv so we
1989 2012 // can be here operating on behalf of a suspended thread (4432884).
1990 2013 bool do_self_suspend = is_external_suspend_with_lock();
1991 2014 if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) {
1992 2015 //
1993 2016 // Because thread is external suspended the safepoint code will count
1994 2017 // thread as at a safepoint. This can be odd because we can be here
1995 2018 // as _thread_in_Java which would normally transition to _thread_blocked
1996 2019 // at a safepoint. We would like to mark the thread as _thread_blocked
1997 2020 // before calling java_suspend_self like all other callers of it but
1998 2021 // we must then observe proper safepoint protocol. (We can't leave
1999 2022 // _thread_blocked with a safepoint in progress). However we can be
2000 2023 // here as _thread_in_native_trans so we can't use a normal transition
2001 2024 // constructor/destructor pair because they assert on that type of
2002 2025 // transition. We could do something like:
2003 2026 //
2004 2027 // JavaThreadState state = thread_state();
2005 2028 // set_thread_state(_thread_in_vm);
2006 2029 // {
2007 2030 // ThreadBlockInVM tbivm(this);
2008 2031 // java_suspend_self()
2009 2032 // }
2010 2033 // set_thread_state(_thread_in_vm_trans);
2011 2034 // if (safepoint) block;
2012 2035 // set_thread_state(state);
2013 2036 //
2014 2037 // but that is pretty messy. Instead we just go with the way the
2015 2038 // code has worked before and note that this is the only path to
2016 2039 // java_suspend_self that doesn't put the thread in _thread_blocked
2017 2040 // mode.
2018 2041
2019 2042 frame_anchor()->make_walkable(this);
2020 2043 java_suspend_self();
2021 2044
2022 2045 // We might be here for reasons in addition to the self-suspend request
2023 2046 // so check for other async requests.
2024 2047 }
2025 2048
2026 2049 if (check_asyncs) {
2027 2050 check_and_handle_async_exceptions();
2028 2051 }
2029 2052 }
2030 2053
2031 2054 void JavaThread::send_thread_stop(oop java_throwable) {
2032 2055 assert(Thread::current()->is_VM_thread(), "should be in the vm thread");
2033 2056 assert(Threads_lock->is_locked(), "Threads_lock should be locked by safepoint code");
2034 2057 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
2035 2058
2036 2059 // Do not throw asynchronous exceptions against the compiler thread
2037 2060 // (the compiler thread should not be a Java thread -- fix in 1.4.2)
2038 2061 if (is_Compiler_thread()) return;
2039 2062
2040 2063 {
2041 2064 // Actually throw the Throwable against the target Thread - however
2042 2065 // only if there is no thread death exception installed already.
2043 2066 if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::ThreadDeath_klass())) {
2044 2067 // If the topmost frame is a runtime stub, then we are calling into
2045 2068 // OptoRuntime from compiled code. Some runtime stubs (new, monitor_exit..)
2046 2069 // must deoptimize the caller before continuing, as the compiled exception handler table
2047 2070 // may not be valid
2048 2071 if (has_last_Java_frame()) {
2049 2072 frame f = last_frame();
2050 2073 if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {
2051 2074 // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
2052 2075 RegisterMap reg_map(this, UseBiasedLocking);
2053 2076 frame compiled_frame = f.sender(®_map);
2054 2077 if (compiled_frame.can_be_deoptimized()) {
2055 2078 Deoptimization::deoptimize(this, compiled_frame, ®_map);
2056 2079 }
2057 2080 }
2058 2081 }
2059 2082
2060 2083 // Set async. pending exception in thread.
2061 2084 set_pending_async_exception(java_throwable);
2062 2085
2063 2086 if (TraceExceptions) {
2064 2087 ResourceMark rm;
2065 2088 tty->print_cr("Pending Async. exception installed of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
2066 2089 }
2067 2090 // for AbortVMOnException flag
2068 2091 NOT_PRODUCT(Exceptions::debug_check_abort(instanceKlass::cast(_pending_async_exception->klass())->external_name()));
2069 2092 }
2070 2093 }
2071 2094
2072 2095
2073 2096 // Interrupt thread so it will wake up from a potential wait()
2074 2097 Thread::interrupt(this);
2075 2098 }
2076 2099
2077 2100 // External suspension mechanism.
2078 2101 //
2079 2102 // Tell the VM to suspend a thread when ever it knows that it does not hold on
2080 2103 // to any VM_locks and it is at a transition
2081 2104 // Self-suspension will happen on the transition out of the vm.
2082 2105 // Catch "this" coming in from JNIEnv pointers when the thread has been freed
2083 2106 //
2084 2107 // Guarantees on return:
2085 2108 // + Target thread will not execute any new bytecode (that's why we need to
2086 2109 // force a safepoint)
2087 2110 // + Target thread will not enter any new monitors
2088 2111 //
2089 2112 void JavaThread::java_suspend() {
2090 2113 { MutexLocker mu(Threads_lock);
2091 2114 if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
2092 2115 return;
2093 2116 }
2094 2117 }
2095 2118
2096 2119 { MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
2097 2120 if (!is_external_suspend()) {
2098 2121 // a racing resume has cancelled us; bail out now
2099 2122 return;
2100 2123 }
2101 2124
2102 2125 // suspend is done
2103 2126 uint32_t debug_bits = 0;
2104 2127 // Warning: is_ext_suspend_completed() may temporarily drop the
2105 2128 // SR_lock to allow the thread to reach a stable thread state if
2106 2129 // it is currently in a transient thread state.
2107 2130 if (is_ext_suspend_completed(false /* !called_by_wait */,
2108 2131 SuspendRetryDelay, &debug_bits) ) {
2109 2132 return;
2110 2133 }
2111 2134 }
2112 2135
2113 2136 VM_ForceSafepoint vm_suspend;
2114 2137 VMThread::execute(&vm_suspend);
2115 2138 }
2116 2139
2117 2140 // Part II of external suspension.
2118 2141 // A JavaThread self suspends when it detects a pending external suspend
2119 2142 // request. This is usually on transitions. It is also done in places
2120 2143 // where continuing to the next transition would surprise the caller,
2121 2144 // e.g., monitor entry.
2122 2145 //
2123 2146 // Returns the number of times that the thread self-suspended.
2124 2147 //
2125 2148 // Note: DO NOT call java_suspend_self() when you just want to block current
2126 2149 // thread. java_suspend_self() is the second stage of cooperative
2127 2150 // suspension for external suspend requests and should only be used
2128 2151 // to complete an external suspend request.
2129 2152 //
2130 2153 int JavaThread::java_suspend_self() {
2131 2154 int ret = 0;
2132 2155
2133 2156 // we are in the process of exiting so don't suspend
2134 2157 if (is_exiting()) {
2135 2158 clear_external_suspend();
2136 2159 return ret;
2137 2160 }
2138 2161
2139 2162 assert(_anchor.walkable() ||
2140 2163 (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
2141 2164 "must have walkable stack");
2142 2165
2143 2166 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
2144 2167
2145 2168 assert(!this->is_ext_suspended(),
2146 2169 "a thread trying to self-suspend should not already be suspended");
2147 2170
2148 2171 if (this->is_suspend_equivalent()) {
2149 2172 // If we are self-suspending as a result of the lifting of a
2150 2173 // suspend equivalent condition, then the suspend_equivalent
2151 2174 // flag is not cleared until we set the ext_suspended flag so
2152 2175 // that wait_for_ext_suspend_completion() returns consistent
2153 2176 // results.
2154 2177 this->clear_suspend_equivalent();
2155 2178 }
2156 2179
2157 2180 // A racing resume may have cancelled us before we grabbed SR_lock
2158 2181 // above. Or another external suspend request could be waiting for us
2159 2182 // by the time we return from SR_lock()->wait(). The thread
2160 2183 // that requested the suspension may already be trying to walk our
2161 2184 // stack and if we return now, we can change the stack out from under
2162 2185 // it. This would be a "bad thing (TM)" and cause the stack walker
2163 2186 // to crash. We stay self-suspended until there are no more pending
2164 2187 // external suspend requests.
2165 2188 while (is_external_suspend()) {
2166 2189 ret++;
2167 2190 this->set_ext_suspended();
2168 2191
2169 2192 // _ext_suspended flag is cleared by java_resume()
2170 2193 while (is_ext_suspended()) {
2171 2194 this->SR_lock()->wait(Mutex::_no_safepoint_check_flag);
2172 2195 }
2173 2196 }
2174 2197
2175 2198 return ret;
2176 2199 }
2177 2200
2178 2201 #ifdef ASSERT
2179 2202 // verify the JavaThread has not yet been published in the Threads::list, and
2180 2203 // hence doesn't need protection from concurrent access at this stage
2181 2204 void JavaThread::verify_not_published() {
2182 2205 if (!Threads_lock->owned_by_self()) {
2183 2206 MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
2184 2207 assert( !Threads::includes(this),
2185 2208 "java thread shouldn't have been published yet!");
2186 2209 }
2187 2210 else {
2188 2211 assert( !Threads::includes(this),
2189 2212 "java thread shouldn't have been published yet!");
2190 2213 }
2191 2214 }
2192 2215 #endif
2193 2216
2194 2217 // Slow path when the native==>VM/Java barriers detect a safepoint is in
2195 2218 // progress or when _suspend_flags is non-zero.
2196 2219 // Current thread needs to self-suspend if there is a suspend request and/or
2197 2220 // block if a safepoint is in progress.
2198 2221 // Async exception ISN'T checked.
2199 2222 // Note only the ThreadInVMfromNative transition can call this function
2200 2223 // directly and when thread state is _thread_in_native_trans
2201 2224 void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
2202 2225 assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
2203 2226
2204 2227 JavaThread *curJT = JavaThread::current();
2205 2228 bool do_self_suspend = thread->is_external_suspend();
2206 2229
2207 2230 assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
2208 2231
2209 2232 // If JNIEnv proxies are allowed, don't self-suspend if the target
2210 2233 // thread is not the current thread. In older versions of jdbx, jdbx
2211 2234 // threads could call into the VM with another thread's JNIEnv so we
2212 2235 // can be here operating on behalf of a suspended thread (4432884).
2213 2236 if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) {
2214 2237 JavaThreadState state = thread->thread_state();
2215 2238
2216 2239 // We mark this thread_blocked state as a suspend-equivalent so
2217 2240 // that a caller to is_ext_suspend_completed() won't be confused.
2218 2241 // The suspend-equivalent state is cleared by java_suspend_self().
2219 2242 thread->set_suspend_equivalent();
2220 2243
2221 2244 // If the safepoint code sees the _thread_in_native_trans state, it will
2222 2245 // wait until the thread changes to other thread state. There is no
2223 2246 // guarantee on how soon we can obtain the SR_lock and complete the
2224 2247 // self-suspend request. It would be a bad idea to let safepoint wait for
2225 2248 // too long. Temporarily change the state to _thread_blocked to
2226 2249 // let the VM thread know that this thread is ready for GC. The problem
2227 2250 // of changing thread state is that safepoint could happen just after
2228 2251 // java_suspend_self() returns after being resumed, and VM thread will
2229 2252 // see the _thread_blocked state. We must check for safepoint
2230 2253 // after restoring the state and make sure we won't leave while a safepoint
2231 2254 // is in progress.
2232 2255 thread->set_thread_state(_thread_blocked);
2233 2256 thread->java_suspend_self();
2234 2257 thread->set_thread_state(state);
2235 2258 // Make sure new state is seen by VM thread
2236 2259 if (os::is_MP()) {
2237 2260 if (UseMembar) {
2238 2261 // Force a fence between the write above and read below
2239 2262 OrderAccess::fence();
2240 2263 } else {
2241 2264 // Must use this rather than serialization page in particular on Windows
2242 2265 InterfaceSupport::serialize_memory(thread);
2243 2266 }
2244 2267 }
2245 2268 }
2246 2269
2247 2270 if (SafepointSynchronize::do_call_back()) {
2248 2271 // If we are safepointing, then block the caller which may not be
2249 2272 // the same as the target thread (see above).
2250 2273 SafepointSynchronize::block(curJT);
2251 2274 }
2252 2275
2253 2276 if (thread->is_deopt_suspend()) {
2254 2277 thread->clear_deopt_suspend();
2255 2278 RegisterMap map(thread, false);
2256 2279 frame f = thread->last_frame();
2257 2280 while ( f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
2258 2281 f = f.sender(&map);
2259 2282 }
2260 2283 if (f.id() == thread->must_deopt_id()) {
2261 2284 thread->clear_must_deopt_id();
2262 2285 f.deoptimize(thread);
2263 2286 } else {
2264 2287 fatal("missed deoptimization!");
2265 2288 }
2266 2289 }
2267 2290 }
2268 2291
2269 2292 // Slow path when the native==>VM/Java barriers detect a safepoint is in
2270 2293 // progress or when _suspend_flags is non-zero.
2271 2294 // Current thread needs to self-suspend if there is a suspend request and/or
2272 2295 // block if a safepoint is in progress.
2273 2296 // Also check for pending async exception (not including unsafe access error).
2274 2297 // Note only the native==>VM/Java barriers can call this function and when
2275 2298 // thread state is _thread_in_native_trans.
2276 2299 void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
2277 2300 check_safepoint_and_suspend_for_native_trans(thread);
2278 2301
2279 2302 if (thread->has_async_exception()) {
2280 2303 // We are in _thread_in_native_trans state, don't handle unsafe
2281 2304 // access error since that may block.
2282 2305 thread->check_and_handle_async_exceptions(false);
2283 2306 }
2284 2307 }
2285 2308
2286 2309 // This is a variant of the normal
2287 2310 // check_special_condition_for_native_trans with slightly different
2288 2311 // semantics for use by critical native wrappers. It does all the
2289 2312 // normal checks but also performs the transition back into
2290 2313 // thread_in_Java state. This is required so that critical natives
2291 2314 // can potentially block and perform a GC if they are the last thread
2292 2315 // exiting the GC_locker.
2293 2316 void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
2294 2317 check_special_condition_for_native_trans(thread);
2295 2318
2296 2319 // Finish the transition
2297 2320 thread->set_thread_state(_thread_in_Java);
2298 2321
2299 2322 if (thread->do_critical_native_unlock()) {
2300 2323 ThreadInVMfromJavaNoAsyncException tiv(thread);
2301 2324 GC_locker::unlock_critical(thread);
2302 2325 thread->clear_critical_native_unlock();
2303 2326 }
2304 2327 }
2305 2328
2306 2329 // We need to guarantee the Threads_lock here, since resumes are not
2307 2330 // allowed during safepoint synchronization
2308 2331 // Can only resume from an external suspension
2309 2332 void JavaThread::java_resume() {
2310 2333 assert_locked_or_safepoint(Threads_lock);
2311 2334
2312 2335 // Sanity check: thread is gone, has started exiting or the thread
2313 2336 // was not externally suspended.
2314 2337 if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
2315 2338 return;
2316 2339 }
2317 2340
2318 2341 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
2319 2342
2320 2343 clear_external_suspend();
2321 2344
2322 2345 if (is_ext_suspended()) {
2323 2346 clear_ext_suspended();
2324 2347 SR_lock()->notify_all();
2325 2348 }
2326 2349 }
2327 2350
2328 2351 void JavaThread::create_stack_guard_pages() {
2329 2352 if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
2330 2353 address low_addr = stack_base() - stack_size();
2331 2354 size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
2332 2355
2333 2356 int allocate = os::allocate_stack_guard_pages();
2334 2357 // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
2335 2358
2336 2359 if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
2337 2360 warning("Attempt to allocate stack guard pages failed.");
2338 2361 return;
2339 2362 }
2340 2363
2341 2364 if (os::guard_memory((char *) low_addr, len)) {
2342 2365 _stack_guard_state = stack_guard_enabled;
2343 2366 } else {
2344 2367 warning("Attempt to protect stack guard pages failed.");
2345 2368 if (os::uncommit_memory((char *) low_addr, len)) {
2346 2369 warning("Attempt to deallocate stack guard pages failed.");
2347 2370 }
2348 2371 }
2349 2372 }
2350 2373
2351 2374 void JavaThread::remove_stack_guard_pages() {
2352 2375 if (_stack_guard_state == stack_guard_unused) return;
2353 2376 address low_addr = stack_base() - stack_size();
2354 2377 size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
2355 2378
2356 2379 if (os::allocate_stack_guard_pages()) {
2357 2380 if (os::remove_stack_guard_pages((char *) low_addr, len)) {
2358 2381 _stack_guard_state = stack_guard_unused;
2359 2382 } else {
2360 2383 warning("Attempt to deallocate stack guard pages failed.");
2361 2384 }
2362 2385 } else {
2363 2386 if (_stack_guard_state == stack_guard_unused) return;
2364 2387 if (os::unguard_memory((char *) low_addr, len)) {
2365 2388 _stack_guard_state = stack_guard_unused;
2366 2389 } else {
2367 2390 warning("Attempt to unprotect stack guard pages failed.");
2368 2391 }
2369 2392 }
2370 2393 }
2371 2394
2372 2395 void JavaThread::enable_stack_yellow_zone() {
2373 2396 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2374 2397 assert(_stack_guard_state != stack_guard_enabled, "already enabled");
2375 2398
2376 2399 // The base notation is from the stacks point of view, growing downward.
2377 2400 // We need to adjust it to work correctly with guard_memory()
2378 2401 address base = stack_yellow_zone_base() - stack_yellow_zone_size();
2379 2402
2380 2403 guarantee(base < stack_base(),"Error calculating stack yellow zone");
2381 2404 guarantee(base < os::current_stack_pointer(),"Error calculating stack yellow zone");
2382 2405
2383 2406 if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
2384 2407 _stack_guard_state = stack_guard_enabled;
2385 2408 } else {
2386 2409 warning("Attempt to guard stack yellow zone failed.");
2387 2410 }
2388 2411 enable_register_stack_guard();
2389 2412 }
2390 2413
2391 2414 void JavaThread::disable_stack_yellow_zone() {
2392 2415 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2393 2416 assert(_stack_guard_state != stack_guard_yellow_disabled, "already disabled");
2394 2417
2395 2418 // Simply return if called for a thread that does not use guard pages.
2396 2419 if (_stack_guard_state == stack_guard_unused) return;
2397 2420
2398 2421 // The base notation is from the stacks point of view, growing downward.
2399 2422 // We need to adjust it to work correctly with guard_memory()
2400 2423 address base = stack_yellow_zone_base() - stack_yellow_zone_size();
2401 2424
2402 2425 if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
2403 2426 _stack_guard_state = stack_guard_yellow_disabled;
2404 2427 } else {
2405 2428 warning("Attempt to unguard stack yellow zone failed.");
2406 2429 }
2407 2430 disable_register_stack_guard();
2408 2431 }
2409 2432
2410 2433 void JavaThread::enable_stack_red_zone() {
2411 2434 // The base notation is from the stacks point of view, growing downward.
2412 2435 // We need to adjust it to work correctly with guard_memory()
2413 2436 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2414 2437 address base = stack_red_zone_base() - stack_red_zone_size();
2415 2438
2416 2439 guarantee(base < stack_base(),"Error calculating stack red zone");
2417 2440 guarantee(base < os::current_stack_pointer(),"Error calculating stack red zone");
2418 2441
2419 2442 if(!os::guard_memory((char *) base, stack_red_zone_size())) {
2420 2443 warning("Attempt to guard stack red zone failed.");
2421 2444 }
2422 2445 }
2423 2446
2424 2447 void JavaThread::disable_stack_red_zone() {
2425 2448 // The base notation is from the stacks point of view, growing downward.
2426 2449 // We need to adjust it to work correctly with guard_memory()
2427 2450 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2428 2451 address base = stack_red_zone_base() - stack_red_zone_size();
2429 2452 if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
2430 2453 warning("Attempt to unguard stack red zone failed.");
2431 2454 }
2432 2455 }
2433 2456
2434 2457 void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
2435 2458 // ignore is there is no stack
2436 2459 if (!has_last_Java_frame()) return;
2437 2460 // traverse the stack frames. Starts from top frame.
2438 2461 for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2439 2462 frame* fr = fst.current();
2440 2463 f(fr, fst.register_map());
2441 2464 }
2442 2465 }
2443 2466
2444 2467
2445 2468 #ifndef PRODUCT
2446 2469 // Deoptimization
2447 2470 // Function for testing deoptimization
2448 2471 void JavaThread::deoptimize() {
2449 2472 // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
2450 2473 StackFrameStream fst(this, UseBiasedLocking);
2451 2474 bool deopt = false; // Dump stack only if a deopt actually happens.
2452 2475 bool only_at = strlen(DeoptimizeOnlyAt) > 0;
2453 2476 // Iterate over all frames in the thread and deoptimize
2454 2477 for(; !fst.is_done(); fst.next()) {
2455 2478 if(fst.current()->can_be_deoptimized()) {
2456 2479
2457 2480 if (only_at) {
2458 2481 // Deoptimize only at particular bcis. DeoptimizeOnlyAt
2459 2482 // consists of comma or carriage return separated numbers so
2460 2483 // search for the current bci in that string.
2461 2484 address pc = fst.current()->pc();
2462 2485 nmethod* nm = (nmethod*) fst.current()->cb();
2463 2486 ScopeDesc* sd = nm->scope_desc_at( pc);
2464 2487 char buffer[8];
2465 2488 jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci());
2466 2489 size_t len = strlen(buffer);
2467 2490 const char * found = strstr(DeoptimizeOnlyAt, buffer);
2468 2491 while (found != NULL) {
2469 2492 if ((found[len] == ',' || found[len] == '\n' || found[len] == '\0') &&
2470 2493 (found == DeoptimizeOnlyAt || found[-1] == ',' || found[-1] == '\n')) {
2471 2494 // Check that the bci found is bracketed by terminators.
2472 2495 break;
2473 2496 }
2474 2497 found = strstr(found + 1, buffer);
2475 2498 }
2476 2499 if (!found) {
2477 2500 continue;
2478 2501 }
2479 2502 }
2480 2503
2481 2504 if (DebugDeoptimization && !deopt) {
2482 2505 deopt = true; // One-time only print before deopt
2483 2506 tty->print_cr("[BEFORE Deoptimization]");
2484 2507 trace_frames();
2485 2508 trace_stack();
2486 2509 }
2487 2510 Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
2488 2511 }
2489 2512 }
2490 2513
2491 2514 if (DebugDeoptimization && deopt) {
2492 2515 tty->print_cr("[AFTER Deoptimization]");
2493 2516 trace_frames();
2494 2517 }
2495 2518 }
2496 2519
2497 2520
2498 2521 // Make zombies
2499 2522 void JavaThread::make_zombies() {
2500 2523 for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2501 2524 if (fst.current()->can_be_deoptimized()) {
2502 2525 // it is a Java nmethod
2503 2526 nmethod* nm = CodeCache::find_nmethod(fst.current()->pc());
2504 2527 nm->make_not_entrant();
2505 2528 }
2506 2529 }
2507 2530 }
2508 2531 #endif // PRODUCT
2509 2532
2510 2533
2511 2534 void JavaThread::deoptimized_wrt_marked_nmethods() {
2512 2535 if (!has_last_Java_frame()) return;
2513 2536 // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
2514 2537 StackFrameStream fst(this, UseBiasedLocking);
2515 2538 for(; !fst.is_done(); fst.next()) {
2516 2539 if (fst.current()->should_be_deoptimized()) {
2517 2540 Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
2518 2541 }
2519 2542 }
2520 2543 }
2521 2544
2522 2545
2523 2546 // GC support
2524 2547 static void frame_gc_epilogue(frame* f, const RegisterMap* map) { f->gc_epilogue(); }
2525 2548
2526 2549 void JavaThread::gc_epilogue() {
2527 2550 frames_do(frame_gc_epilogue);
2528 2551 }
2529 2552
2530 2553
2531 2554 static void frame_gc_prologue(frame* f, const RegisterMap* map) { f->gc_prologue(); }
2532 2555
2533 2556 void JavaThread::gc_prologue() {
2534 2557 frames_do(frame_gc_prologue);
2535 2558 }
2536 2559
2537 2560 // If the caller is a NamedThread, then remember, in the current scope,
2538 2561 // the given JavaThread in its _processed_thread field.
2539 2562 class RememberProcessedThread: public StackObj {
2540 2563 NamedThread* _cur_thr;
2541 2564 public:
2542 2565 RememberProcessedThread(JavaThread* jthr) {
2543 2566 Thread* thread = Thread::current();
2544 2567 if (thread->is_Named_thread()) {
2545 2568 _cur_thr = (NamedThread *)thread;
2546 2569 _cur_thr->set_processed_thread(jthr);
2547 2570 } else {
2548 2571 _cur_thr = NULL;
2549 2572 }
2550 2573 }
2551 2574
2552 2575 ~RememberProcessedThread() {
2553 2576 if (_cur_thr) {
2554 2577 _cur_thr->set_processed_thread(NULL);
2555 2578 }
2556 2579 }
2557 2580 };
2558 2581
2559 2582 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
2560 2583 // Verify that the deferred card marks have been flushed.
2561 2584 assert(deferred_card_mark().is_empty(), "Should be empty during GC");
2562 2585
2563 2586 // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
2564 2587 // since there may be more than one thread using each ThreadProfiler.
2565 2588
2566 2589 // Traverse the GCHandles
2567 2590 Thread::oops_do(f, cf);
2568 2591
2569 2592 assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
2570 2593 (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2571 2594
2572 2595 if (has_last_Java_frame()) {
2573 2596 // Record JavaThread to GC thread
2574 2597 RememberProcessedThread rpt(this);
2575 2598
2576 2599 // Traverse the privileged stack
2577 2600 if (_privileged_stack_top != NULL) {
2578 2601 _privileged_stack_top->oops_do(f);
2579 2602 }
2580 2603
2581 2604 // traverse the registered growable array
2582 2605 if (_array_for_gc != NULL) {
2583 2606 for (int index = 0; index < _array_for_gc->length(); index++) {
2584 2607 f->do_oop(_array_for_gc->adr_at(index));
2585 2608 }
2586 2609 }
2587 2610
2588 2611 // Traverse the monitor chunks
2589 2612 for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
2590 2613 chunk->oops_do(f);
2591 2614 }
2592 2615
2593 2616 // Traverse the execution stack
2594 2617 for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2595 2618 fst.current()->oops_do(f, cf, fst.register_map());
2596 2619 }
2597 2620 }
2598 2621
2599 2622 // callee_target is never live across a gc point so NULL it here should
2600 2623 // it still contain a methdOop.
2601 2624
2602 2625 set_callee_target(NULL);
2603 2626
2604 2627 assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
2605 2628 // If we have deferred set_locals there might be oops waiting to be
2606 2629 // written
2607 2630 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
2608 2631 if (list != NULL) {
2609 2632 for (int i = 0; i < list->length(); i++) {
2610 2633 list->at(i)->oops_do(f);
2611 2634 }
2612 2635 }
2613 2636
2614 2637 // Traverse instance variables at the end since the GC may be moving things
2615 2638 // around using this function
2616 2639 f->do_oop((oop*) &_threadObj);
2617 2640 f->do_oop((oop*) &_vm_result);
2618 2641 f->do_oop((oop*) &_vm_result_2);
2619 2642 f->do_oop((oop*) &_exception_oop);
2620 2643 f->do_oop((oop*) &_pending_async_exception);
2621 2644
2622 2645 if (jvmti_thread_state() != NULL) {
2623 2646 jvmti_thread_state()->oops_do(f);
2624 2647 }
2625 2648 }
2626 2649
2627 2650 void JavaThread::nmethods_do(CodeBlobClosure* cf) {
2628 2651 Thread::nmethods_do(cf); // (super method is a no-op)
2629 2652
2630 2653 assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
2631 2654 (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2632 2655
2633 2656 if (has_last_Java_frame()) {
2634 2657 // Traverse the execution stack
2635 2658 for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2636 2659 fst.current()->nmethods_do(cf);
2637 2660 }
2638 2661 }
2639 2662 }
2640 2663
2641 2664 // Printing
2642 2665 const char* _get_thread_state_name(JavaThreadState _thread_state) {
2643 2666 switch (_thread_state) {
2644 2667 case _thread_uninitialized: return "_thread_uninitialized";
2645 2668 case _thread_new: return "_thread_new";
2646 2669 case _thread_new_trans: return "_thread_new_trans";
2647 2670 case _thread_in_native: return "_thread_in_native";
2648 2671 case _thread_in_native_trans: return "_thread_in_native_trans";
2649 2672 case _thread_in_vm: return "_thread_in_vm";
2650 2673 case _thread_in_vm_trans: return "_thread_in_vm_trans";
2651 2674 case _thread_in_Java: return "_thread_in_Java";
2652 2675 case _thread_in_Java_trans: return "_thread_in_Java_trans";
2653 2676 case _thread_blocked: return "_thread_blocked";
2654 2677 case _thread_blocked_trans: return "_thread_blocked_trans";
2655 2678 default: return "unknown thread state";
2656 2679 }
2657 2680 }
2658 2681
2659 2682 #ifndef PRODUCT
2660 2683 void JavaThread::print_thread_state_on(outputStream *st) const {
2661 2684 st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state));
2662 2685 };
2663 2686 void JavaThread::print_thread_state() const {
2664 2687 print_thread_state_on(tty);
2665 2688 };
2666 2689 #endif // PRODUCT
2667 2690
2668 2691 // Called by Threads::print() for VM_PrintThreads operation
2669 2692 void JavaThread::print_on(outputStream *st) const {
2670 2693 st->print("\"%s\" ", get_thread_name());
2671 2694 oop thread_oop = threadObj();
2672 2695 if (thread_oop != NULL && java_lang_Thread::is_daemon(thread_oop)) st->print("daemon ");
2673 2696 Thread::print_on(st);
2674 2697 // print guess for valid stack memory region (assume 4K pages); helps lock debugging
2675 2698 st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12));
2676 2699 if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) {
2677 2700 st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop));
2678 2701 }
2679 2702 #ifndef PRODUCT
2680 2703 print_thread_state_on(st);
2681 2704 _safepoint_state->print_on(st);
2682 2705 #endif // PRODUCT
2683 2706 }
2684 2707
2685 2708 // Called by fatal error handler. The difference between this and
2686 2709 // JavaThread::print() is that we can't grab lock or allocate memory.
2687 2710 void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
2688 2711 st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
2689 2712 oop thread_obj = threadObj();
2690 2713 if (thread_obj != NULL) {
2691 2714 if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
2692 2715 }
2693 2716 st->print(" [");
2694 2717 st->print("%s", _get_thread_state_name(_thread_state));
2695 2718 if (osthread()) {
2696 2719 st->print(", id=%d", osthread()->thread_id());
2697 2720 }
2698 2721 st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
2699 2722 _stack_base - _stack_size, _stack_base);
2700 2723 st->print("]");
2701 2724 return;
2702 2725 }
2703 2726
2704 2727 // Verification
2705 2728
2706 2729 static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
2707 2730
2708 2731 void JavaThread::verify() {
2709 2732 // Verify oops in the thread.
2710 2733 oops_do(&VerifyOopClosure::verify_oop, NULL);
2711 2734
2712 2735 // Verify the stack frames.
2713 2736 frames_do(frame_verify);
2714 2737 }
2715 2738
2716 2739 // CR 6300358 (sub-CR 2137150)
2717 2740 // Most callers of this method assume that it can't return NULL but a
2718 2741 // thread may not have a name whilst it is in the process of attaching to
2719 2742 // the VM - see CR 6412693, and there are places where a JavaThread can be
2720 2743 // seen prior to having it's threadObj set (eg JNI attaching threads and
2721 2744 // if vm exit occurs during initialization). These cases can all be accounted
2722 2745 // for such that this method never returns NULL.
2723 2746 const char* JavaThread::get_thread_name() const {
2724 2747 #ifdef ASSERT
2725 2748 // early safepoints can hit while current thread does not yet have TLS
2726 2749 if (!SafepointSynchronize::is_at_safepoint()) {
2727 2750 Thread *cur = Thread::current();
2728 2751 if (!(cur->is_Java_thread() && cur == this)) {
2729 2752 // Current JavaThreads are allowed to get their own name without
2730 2753 // the Threads_lock.
2731 2754 assert_locked_or_safepoint(Threads_lock);
2732 2755 }
2733 2756 }
2734 2757 #endif // ASSERT
2735 2758 return get_thread_name_string();
2736 2759 }
2737 2760
2738 2761 // Returns a non-NULL representation of this thread's name, or a suitable
2739 2762 // descriptive string if there is no set name
2740 2763 const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
2741 2764 const char* name_str;
2742 2765 oop thread_obj = threadObj();
2743 2766 if (thread_obj != NULL) {
2744 2767 typeArrayOop name = java_lang_Thread::name(thread_obj);
2745 2768 if (name != NULL) {
2746 2769 if (buf == NULL) {
2747 2770 name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
2748 2771 }
2749 2772 else {
2750 2773 name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length(), buf, buflen);
2751 2774 }
2752 2775 }
2753 2776 else if (is_attaching_via_jni()) { // workaround for 6412693 - see 6404306
2754 2777 name_str = "<no-name - thread is attaching>";
2755 2778 }
2756 2779 else {
2757 2780 name_str = Thread::name();
2758 2781 }
2759 2782 }
2760 2783 else {
2761 2784 name_str = Thread::name();
2762 2785 }
2763 2786 assert(name_str != NULL, "unexpected NULL thread name");
2764 2787 return name_str;
2765 2788 }
2766 2789
2767 2790
2768 2791 const char* JavaThread::get_threadgroup_name() const {
2769 2792 debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
2770 2793 oop thread_obj = threadObj();
2771 2794 if (thread_obj != NULL) {
2772 2795 oop thread_group = java_lang_Thread::threadGroup(thread_obj);
2773 2796 if (thread_group != NULL) {
2774 2797 typeArrayOop name = java_lang_ThreadGroup::name(thread_group);
2775 2798 // ThreadGroup.name can be null
2776 2799 if (name != NULL) {
2777 2800 const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
2778 2801 return str;
2779 2802 }
2780 2803 }
2781 2804 }
2782 2805 return NULL;
2783 2806 }
2784 2807
2785 2808 const char* JavaThread::get_parent_name() const {
2786 2809 debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
2787 2810 oop thread_obj = threadObj();
2788 2811 if (thread_obj != NULL) {
2789 2812 oop thread_group = java_lang_Thread::threadGroup(thread_obj);
2790 2813 if (thread_group != NULL) {
2791 2814 oop parent = java_lang_ThreadGroup::parent(thread_group);
2792 2815 if (parent != NULL) {
2793 2816 typeArrayOop name = java_lang_ThreadGroup::name(parent);
2794 2817 // ThreadGroup.name can be null
2795 2818 if (name != NULL) {
2796 2819 const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
2797 2820 return str;
2798 2821 }
2799 2822 }
2800 2823 }
2801 2824 }
2802 2825 return NULL;
2803 2826 }
2804 2827
2805 2828 ThreadPriority JavaThread::java_priority() const {
2806 2829 oop thr_oop = threadObj();
2807 2830 if (thr_oop == NULL) return NormPriority; // Bootstrapping
2808 2831 ThreadPriority priority = java_lang_Thread::priority(thr_oop);
2809 2832 assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
2810 2833 return priority;
2811 2834 }
2812 2835
2813 2836 void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
2814 2837
2815 2838 assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
2816 2839 // Link Java Thread object <-> C++ Thread
2817 2840
2818 2841 // Get the C++ thread object (an oop) from the JNI handle (a jthread)
2819 2842 // and put it into a new Handle. The Handle "thread_oop" can then
2820 2843 // be used to pass the C++ thread object to other methods.
2821 2844
2822 2845 // Set the Java level thread object (jthread) field of the
2823 2846 // new thread (a JavaThread *) to C++ thread object using the
2824 2847 // "thread_oop" handle.
2825 2848
2826 2849 // Set the thread field (a JavaThread *) of the
2827 2850 // oop representing the java_lang_Thread to the new thread (a JavaThread *).
2828 2851
2829 2852 Handle thread_oop(Thread::current(),
2830 2853 JNIHandles::resolve_non_null(jni_thread));
2831 2854 assert(instanceKlass::cast(thread_oop->klass())->is_linked(),
2832 2855 "must be initialized");
2833 2856 set_threadObj(thread_oop());
2834 2857 java_lang_Thread::set_thread(thread_oop(), this);
2835 2858
2836 2859 if (prio == NoPriority) {
2837 2860 prio = java_lang_Thread::priority(thread_oop());
2838 2861 assert(prio != NoPriority, "A valid priority should be present");
2839 2862 }
2840 2863
2841 2864 // Push the Java priority down to the native thread; needs Threads_lock
2842 2865 Thread::set_priority(this, prio);
2843 2866
2844 2867 // Add the new thread to the Threads list and set it in motion.
2845 2868 // We must have threads lock in order to call Threads::add.
2846 2869 // It is crucial that we do not block before the thread is
2847 2870 // added to the Threads list for if a GC happens, then the java_thread oop
2848 2871 // will not be visited by GC.
2849 2872 Threads::add(this);
2850 2873 }
2851 2874
2852 2875 oop JavaThread::current_park_blocker() {
2853 2876 // Support for JSR-166 locks
2854 2877 oop thread_oop = threadObj();
2855 2878 if (thread_oop != NULL &&
2856 2879 JDK_Version::current().supports_thread_park_blocker()) {
2857 2880 return java_lang_Thread::park_blocker(thread_oop);
2858 2881 }
2859 2882 return NULL;
2860 2883 }
2861 2884
2862 2885
2863 2886 void JavaThread::print_stack_on(outputStream* st) {
2864 2887 if (!has_last_Java_frame()) return;
2865 2888 ResourceMark rm;
2866 2889 HandleMark hm;
2867 2890
2868 2891 RegisterMap reg_map(this);
2869 2892 vframe* start_vf = last_java_vframe(®_map);
2870 2893 int count = 0;
2871 2894 for (vframe* f = start_vf; f; f = f->sender() ) {
2872 2895 if (f->is_java_frame()) {
2873 2896 javaVFrame* jvf = javaVFrame::cast(f);
2874 2897 java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
2875 2898
2876 2899 // Print out lock information
2877 2900 if (JavaMonitorsInStackTrace) {
2878 2901 jvf->print_lock_info_on(st, count);
2879 2902 }
2880 2903 } else {
2881 2904 // Ignore non-Java frames
2882 2905 }
2883 2906
2884 2907 // Bail-out case for too deep stacks
2885 2908 count++;
2886 2909 if (MaxJavaStackTraceDepth == count) return;
2887 2910 }
2888 2911 }
2889 2912
2890 2913
2891 2914 // JVMTI PopFrame support
2892 2915 void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
2893 2916 assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments");
2894 2917 if (in_bytes(size_in_bytes) != 0) {
2895 2918 _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
2896 2919 _popframe_preserved_args_size = in_bytes(size_in_bytes);
2897 2920 Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
2898 2921 }
2899 2922 }
2900 2923
2901 2924 void* JavaThread::popframe_preserved_args() {
2902 2925 return _popframe_preserved_args;
2903 2926 }
2904 2927
2905 2928 ByteSize JavaThread::popframe_preserved_args_size() {
2906 2929 return in_ByteSize(_popframe_preserved_args_size);
2907 2930 }
2908 2931
2909 2932 WordSize JavaThread::popframe_preserved_args_size_in_words() {
2910 2933 int sz = in_bytes(popframe_preserved_args_size());
2911 2934 assert(sz % wordSize == 0, "argument size must be multiple of wordSize");
2912 2935 return in_WordSize(sz / wordSize);
2913 2936 }
2914 2937
2915 2938 void JavaThread::popframe_free_preserved_args() {
2916 2939 assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
2917 2940 FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args);
2918 2941 _popframe_preserved_args = NULL;
2919 2942 _popframe_preserved_args_size = 0;
2920 2943 }
2921 2944
2922 2945 #ifndef PRODUCT
2923 2946
2924 2947 void JavaThread::trace_frames() {
2925 2948 tty->print_cr("[Describe stack]");
2926 2949 int frame_no = 1;
2927 2950 for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2928 2951 tty->print(" %d. ", frame_no++);
2929 2952 fst.current()->print_value_on(tty,this);
2930 2953 tty->cr();
2931 2954 }
2932 2955 }
2933 2956
2934 2957 class PrintAndVerifyOopClosure: public OopClosure {
2935 2958 protected:
2936 2959 template <class T> inline void do_oop_work(T* p) {
2937 2960 oop obj = oopDesc::load_decode_heap_oop(p);
2938 2961 if (obj == NULL) return;
2939 2962 tty->print(INTPTR_FORMAT ": ", p);
2940 2963 if (obj->is_oop_or_null()) {
2941 2964 if (obj->is_objArray()) {
2942 2965 tty->print_cr("valid objArray: " INTPTR_FORMAT, (oopDesc*) obj);
2943 2966 } else {
2944 2967 obj->print();
2945 2968 }
2946 2969 } else {
2947 2970 tty->print_cr("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj);
2948 2971 }
2949 2972 tty->cr();
2950 2973 }
2951 2974 public:
2952 2975 virtual void do_oop(oop* p) { do_oop_work(p); }
2953 2976 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2954 2977 };
2955 2978
2956 2979
2957 2980 static void oops_print(frame* f, const RegisterMap *map) {
2958 2981 PrintAndVerifyOopClosure print;
2959 2982 f->print_value();
2960 2983 f->oops_do(&print, NULL, (RegisterMap*)map);
2961 2984 }
2962 2985
2963 2986 // Print our all the locations that contain oops and whether they are
2964 2987 // valid or not. This useful when trying to find the oldest frame
2965 2988 // where an oop has gone bad since the frame walk is from youngest to
2966 2989 // oldest.
2967 2990 void JavaThread::trace_oops() {
2968 2991 tty->print_cr("[Trace oops]");
2969 2992 frames_do(oops_print);
2970 2993 }
2971 2994
2972 2995
2973 2996 #ifdef ASSERT
2974 2997 // Print or validate the layout of stack frames
2975 2998 void JavaThread::print_frame_layout(int depth, bool validate_only) {
2976 2999 ResourceMark rm;
2977 3000 PRESERVE_EXCEPTION_MARK;
2978 3001 FrameValues values;
2979 3002 int frame_no = 0;
2980 3003 for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
2981 3004 fst.current()->describe(values, ++frame_no);
2982 3005 if (depth == frame_no) break;
2983 3006 }
2984 3007 if (validate_only) {
2985 3008 values.validate();
2986 3009 } else {
2987 3010 tty->print_cr("[Describe stack layout]");
2988 3011 values.print(this);
2989 3012 }
2990 3013 }
2991 3014 #endif
2992 3015
2993 3016 void JavaThread::trace_stack_from(vframe* start_vf) {
2994 3017 ResourceMark rm;
2995 3018 int vframe_no = 1;
2996 3019 for (vframe* f = start_vf; f; f = f->sender() ) {
2997 3020 if (f->is_java_frame()) {
2998 3021 javaVFrame::cast(f)->print_activation(vframe_no++);
2999 3022 } else {
3000 3023 f->print();
3001 3024 }
3002 3025 if (vframe_no > StackPrintLimit) {
3003 3026 tty->print_cr("...<more frames>...");
3004 3027 return;
3005 3028 }
3006 3029 }
3007 3030 }
3008 3031
3009 3032
3010 3033 void JavaThread::trace_stack() {
3011 3034 if (!has_last_Java_frame()) return;
3012 3035 ResourceMark rm;
3013 3036 HandleMark hm;
3014 3037 RegisterMap reg_map(this);
3015 3038 trace_stack_from(last_java_vframe(®_map));
3016 3039 }
3017 3040
3018 3041
3019 3042 #endif // PRODUCT
3020 3043
3021 3044
3022 3045 javaVFrame* JavaThread::last_java_vframe(RegisterMap *reg_map) {
3023 3046 assert(reg_map != NULL, "a map must be given");
3024 3047 frame f = last_frame();
3025 3048 for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender() ) {
3026 3049 if (vf->is_java_frame()) return javaVFrame::cast(vf);
3027 3050 }
3028 3051 return NULL;
3029 3052 }
3030 3053
3031 3054
3032 3055 klassOop JavaThread::security_get_caller_class(int depth) {
3033 3056 vframeStream vfst(this);
3034 3057 vfst.security_get_caller_frame(depth);
3035 3058 if (!vfst.at_end()) {
3036 3059 return vfst.method()->method_holder();
3037 3060 }
3038 3061 return NULL;
3039 3062 }
3040 3063
3041 3064 static void compiler_thread_entry(JavaThread* thread, TRAPS) {
3042 3065 assert(thread->is_Compiler_thread(), "must be compiler thread");
3043 3066 CompileBroker::compiler_thread_loop();
3044 3067 }
3045 3068
3046 3069 // Create a CompilerThread
3047 3070 CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
3048 3071 : JavaThread(&compiler_thread_entry) {
3049 3072 _env = NULL;
3050 3073 _log = NULL;
3051 3074 _task = NULL;
3052 3075 _queue = queue;
3053 3076 _counters = counters;
3054 3077 _buffer_blob = NULL;
3055 3078 _scanned_nmethod = NULL;
3056 3079
3057 3080 #ifndef PRODUCT
3058 3081 _ideal_graph_printer = NULL;
3059 3082 #endif
3060 3083 }
3061 3084
3062 3085 void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
3063 3086 JavaThread::oops_do(f, cf);
3064 3087 if (_scanned_nmethod != NULL && cf != NULL) {
3065 3088 // Safepoints can occur when the sweeper is scanning an nmethod so
3066 3089 // process it here to make sure it isn't unloaded in the middle of
3067 3090 // a scan.
3068 3091 cf->do_code_blob(_scanned_nmethod);
3069 3092 }
3070 3093 }
3071 3094
3072 3095 // ======= Threads ========
3073 3096
3074 3097 // The Threads class links together all active threads, and provides
3075 3098 // operations over all threads. It is protected by its own Mutex
3076 3099 // lock, which is also used in other contexts to protect thread
3077 3100 // operations from having the thread being operated on from exiting
3078 3101 // and going away unexpectedly (e.g., safepoint synchronization)
3079 3102
3080 3103 JavaThread* Threads::_thread_list = NULL;
3081 3104 int Threads::_number_of_threads = 0;
3082 3105 int Threads::_number_of_non_daemon_threads = 0;
3083 3106 int Threads::_return_code = 0;
3084 3107 size_t JavaThread::_stack_size_at_create = 0;
3085 3108
3086 3109 // All JavaThreads
3087 3110 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
3088 3111
3089 3112 void os_stream();
3090 3113
3091 3114 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3092 3115 void Threads::threads_do(ThreadClosure* tc) {
3093 3116 assert_locked_or_safepoint(Threads_lock);
3094 3117 // ALL_JAVA_THREADS iterates through all JavaThreads
3095 3118 ALL_JAVA_THREADS(p) {
3096 3119 tc->do_thread(p);
3097 3120 }
3098 3121 // Someday we could have a table or list of all non-JavaThreads.
3099 3122 // For now, just manually iterate through them.
3100 3123 tc->do_thread(VMThread::vm_thread());
3101 3124 Universe::heap()->gc_threads_do(tc);
3102 3125 WatcherThread *wt = WatcherThread::watcher_thread();
3103 3126 // Strictly speaking, the following NULL check isn't sufficient to make sure
3104 3127 // the data for WatcherThread is still valid upon being examined. However,
3105 3128 // considering that WatchThread terminates when the VM is on the way to
3106 3129 // exit at safepoint, the chance of the above is extremely small. The right
3107 3130 // way to prevent termination of WatcherThread would be to acquire
3108 3131 // Terminator_lock, but we can't do that without violating the lock rank
3109 3132 // checking in some cases.
3110 3133 if (wt != NULL)
3111 3134 tc->do_thread(wt);
3112 3135
3113 3136 // If CompilerThreads ever become non-JavaThreads, add them here
3114 3137 }
3115 3138
3116 3139 jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
3117 3140
3118 3141 extern void JDK_Version_init();
3119 3142
3120 3143 // Check version
3121 3144 if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
3122 3145
3123 3146 // Initialize the output stream module
3124 3147 ostream_init();
3125 3148
3126 3149 // Process java launcher properties.
3127 3150 Arguments::process_sun_java_launcher_properties(args);
3128 3151
3129 3152 // Initialize the os module before using TLS
3130 3153 os::init();
3131 3154
3132 3155 // Initialize system properties.
3133 3156 Arguments::init_system_properties();
3134 3157
3135 3158 // So that JDK version can be used as a discrimintor when parsing arguments
3136 3159 JDK_Version_init();
3137 3160
3138 3161 // Update/Initialize System properties after JDK version number is known
3139 3162 Arguments::init_version_specific_system_properties();
3140 3163
3141 3164 // Parse arguments
3142 3165 jint parse_result = Arguments::parse(args);
3143 3166 if (parse_result != JNI_OK) return parse_result;
3144 3167
3145 3168 if (PauseAtStartup) {
3146 3169 os::pause();
3147 3170 }
3148 3171
3149 3172 #ifndef USDT2
3150 3173 HS_DTRACE_PROBE(hotspot, vm__init__begin);
3151 3174 #else /* USDT2 */
3152 3175 HOTSPOT_VM_INIT_BEGIN();
3153 3176 #endif /* USDT2 */
3154 3177
3155 3178 // Record VM creation timing statistics
3156 3179 TraceVmCreationTime create_vm_timer;
3157 3180 create_vm_timer.start();
3158 3181
3159 3182 // Timing (must come after argument parsing)
3160 3183 TraceTime timer("Create VM", TraceStartupTime);
3161 3184
3162 3185 // Initialize the os module after parsing the args
3163 3186 jint os_init_2_result = os::init_2();
3164 3187 if (os_init_2_result != JNI_OK) return os_init_2_result;
3165 3188
3166 3189 // Initialize output stream logging
3167 3190 ostream_init_log();
3168 3191
3169 3192 // Convert -Xrun to -agentlib: if there is no JVM_OnLoad
3170 3193 // Must be before create_vm_init_agents()
3171 3194 if (Arguments::init_libraries_at_startup()) {
3172 3195 convert_vm_init_libraries_to_agents();
3173 3196 }
3174 3197
3175 3198 // Launch -agentlib/-agentpath and converted -Xrun agents
3176 3199 if (Arguments::init_agents_at_startup()) {
3177 3200 create_vm_init_agents();
3178 3201 }
3179 3202
3180 3203 // Initialize Threads state
3181 3204 _thread_list = NULL;
3182 3205 _number_of_threads = 0;
3183 3206 _number_of_non_daemon_threads = 0;
3184 3207
3185 3208 // Initialize TLS
3186 3209 ThreadLocalStorage::init();
3187 3210
3188 3211 // Initialize global data structures and create system classes in heap
3189 3212 vm_init_globals();
3190 3213
3191 3214 // Attach the main thread to this os thread
3192 3215 JavaThread* main_thread = new JavaThread();
3193 3216 main_thread->set_thread_state(_thread_in_vm);
3194 3217 // must do this before set_active_handles and initialize_thread_local_storage
3195 3218 // Note: on solaris initialize_thread_local_storage() will (indirectly)
3196 3219 // change the stack size recorded here to one based on the java thread
3197 3220 // stacksize. This adjusted size is what is used to figure the placement
3198 3221 // of the guard pages.
3199 3222 main_thread->record_stack_base_and_size();
3200 3223 main_thread->initialize_thread_local_storage();
3201 3224
3202 3225 main_thread->set_active_handles(JNIHandleBlock::allocate_block());
3203 3226
3204 3227 if (!main_thread->set_as_starting_thread()) {
3205 3228 vm_shutdown_during_initialization(
3206 3229 "Failed necessary internal allocation. Out of swap space");
3207 3230 delete main_thread;
3208 3231 *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
3209 3232 return JNI_ENOMEM;
3210 3233 }
3211 3234
3212 3235 // Enable guard page *after* os::create_main_thread(), otherwise it would
3213 3236 // crash Linux VM, see notes in os_linux.cpp.
3214 3237 main_thread->create_stack_guard_pages();
3215 3238
3216 3239 // Initialize Java-Level synchronization subsystem
3217 3240 ObjectMonitor::Initialize() ;
3218 3241
3219 3242 // Initialize global modules
3220 3243 jint status = init_globals();
3221 3244 if (status != JNI_OK) {
3222 3245 delete main_thread;
3223 3246 *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
3224 3247 return status;
3225 3248 }
3226 3249
3227 3250 // Should be done after the heap is fully created
3228 3251 main_thread->cache_global_variables();
3229 3252
3230 3253 HandleMark hm;
3231 3254
3232 3255 { MutexLocker mu(Threads_lock);
3233 3256 Threads::add(main_thread);
3234 3257 }
3235 3258
3236 3259 // Any JVMTI raw monitors entered in onload will transition into
3237 3260 // real raw monitor. VM is setup enough here for raw monitor enter.
3238 3261 JvmtiExport::transition_pending_onload_raw_monitors();
3239 3262
3240 3263 if (VerifyBeforeGC &&
3241 3264 Universe::heap()->total_collections() >= VerifyGCStartAt) {
3242 3265 Universe::heap()->prepare_for_verify();
3243 3266 Universe::verify(); // make sure we're starting with a clean slate
3244 3267 }
3245 3268
3246 3269 // Create the VMThread
3247 3270 { TraceTime timer("Start VMThread", TraceStartupTime);
3248 3271 VMThread::create();
3249 3272 Thread* vmthread = VMThread::vm_thread();
3250 3273
3251 3274 if (!os::create_thread(vmthread, os::vm_thread))
3252 3275 vm_exit_during_initialization("Cannot create VM thread. Out of system resources.");
3253 3276
3254 3277 // Wait for the VM thread to become ready, and VMThread::run to initialize
3255 3278 // Monitors can have spurious returns, must always check another state flag
3256 3279 {
3257 3280 MutexLocker ml(Notify_lock);
3258 3281 os::start_thread(vmthread);
3259 3282 while (vmthread->active_handles() == NULL) {
3260 3283 Notify_lock->wait();
3261 3284 }
3262 3285 }
3263 3286 }
3264 3287
3265 3288 assert (Universe::is_fully_initialized(), "not initialized");
3266 3289 EXCEPTION_MARK;
3267 3290
3268 3291 // At this point, the Universe is initialized, but we have not executed
3269 3292 // any byte code. Now is a good time (the only time) to dump out the
3270 3293 // internal state of the JVM for sharing.
3271 3294
3272 3295 if (DumpSharedSpaces) {
3273 3296 Universe::heap()->preload_and_dump(CHECK_0);
3274 3297 ShouldNotReachHere();
3275 3298 }
3276 3299
3277 3300 // Always call even when there are not JVMTI environments yet, since environments
3278 3301 // may be attached late and JVMTI must track phases of VM execution
3279 3302 JvmtiExport::enter_start_phase();
3280 3303
3281 3304 // Notify JVMTI agents that VM has started (JNI is up) - nop if no agents.
3282 3305 JvmtiExport::post_vm_start();
3283 3306
3284 3307 {
3285 3308 TraceTime timer("Initialize java.lang classes", TraceStartupTime);
3286 3309
3287 3310 if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
3288 3311 create_vm_init_libraries();
3289 3312 }
3290 3313
3291 3314 if (InitializeJavaLangString) {
3292 3315 initialize_class(vmSymbols::java_lang_String(), CHECK_0);
3293 3316 } else {
3294 3317 warning("java.lang.String not initialized");
3295 3318 }
3296 3319
3297 3320 if (AggressiveOpts) {
3298 3321 {
3299 3322 // Forcibly initialize java/util/HashMap and mutate the private
3300 3323 // static final "frontCacheEnabled" field before we start creating instances
3301 3324 #ifdef ASSERT
3302 3325 klassOop tmp_k = SystemDictionary::find(vmSymbols::java_util_HashMap(), Handle(), Handle(), CHECK_0);
3303 3326 assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet");
3304 3327 #endif
3305 3328 klassOop k_o = SystemDictionary::resolve_or_null(vmSymbols::java_util_HashMap(), Handle(), Handle(), CHECK_0);
3306 3329 KlassHandle k = KlassHandle(THREAD, k_o);
3307 3330 guarantee(k.not_null(), "Must find java/util/HashMap");
3308 3331 instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
3309 3332 ik->initialize(CHECK_0);
3310 3333 fieldDescriptor fd;
3311 3334 // Possible we might not find this field; if so, don't break
3312 3335 if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
3313 3336 k()->java_mirror()->bool_field_put(fd.offset(), true);
3314 3337 }
3315 3338 }
3316 3339
3317 3340 if (UseStringCache) {
3318 3341 // Forcibly initialize java/lang/StringValue and mutate the private
3319 3342 // static final "stringCacheEnabled" field before we start creating instances
3320 3343 klassOop k_o = SystemDictionary::resolve_or_null(vmSymbols::java_lang_StringValue(), Handle(), Handle(), CHECK_0);
3321 3344 // Possible that StringValue isn't present: if so, silently don't break
3322 3345 if (k_o != NULL) {
3323 3346 KlassHandle k = KlassHandle(THREAD, k_o);
3324 3347 instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
3325 3348 ik->initialize(CHECK_0);
3326 3349 fieldDescriptor fd;
3327 3350 // Possible we might not find this field: if so, silently don't break
3328 3351 if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
3329 3352 k()->java_mirror()->bool_field_put(fd.offset(), true);
3330 3353 }
3331 3354 }
3332 3355 }
3333 3356 }
3334 3357
3335 3358 // Initialize java_lang.System (needed before creating the thread)
3336 3359 if (InitializeJavaLangSystem) {
3337 3360 initialize_class(vmSymbols::java_lang_System(), CHECK_0);
3338 3361 initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0);
3339 3362 Handle thread_group = create_initial_thread_group(CHECK_0);
3340 3363 Universe::set_main_thread_group(thread_group());
3341 3364 initialize_class(vmSymbols::java_lang_Thread(), CHECK_0);
3342 3365 oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
3343 3366 main_thread->set_threadObj(thread_object);
3344 3367 // Set thread status to running since main thread has
↓ open down ↓ |
2336 lines elided |
↑ open up ↑ |
3345 3368 // been started and running.
3346 3369 java_lang_Thread::set_thread_status(thread_object,
3347 3370 java_lang_Thread::RUNNABLE);
3348 3371
3349 3372 // The VM preresolve methods to these classes. Make sure that get initialized
3350 3373 initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
3351 3374 initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK_0);
3352 3375 // The VM creates & returns objects of this class. Make sure it's initialized.
3353 3376 initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
3354 3377 call_initializeSystemClass(CHECK_0);
3378 +
3379 + // get the Java runtime name after java.lang.System is initialized
3380 + JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
3355 3381 } else {
3356 3382 warning("java.lang.System not initialized");
3357 3383 }
3358 3384
3359 3385 // an instance of OutOfMemory exception has been allocated earlier
3360 3386 if (InitializeJavaLangExceptionsErrors) {
3361 3387 initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK_0);
3362 3388 initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK_0);
3363 3389 initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK_0);
3364 3390 initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK_0);
3365 3391 initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
3366 3392 initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
3367 3393 initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
3368 3394 initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
3369 3395 } else {
3370 3396 warning("java.lang.OutOfMemoryError has not been initialized");
3371 3397 warning("java.lang.NullPointerException has not been initialized");
3372 3398 warning("java.lang.ClassCastException has not been initialized");
3373 3399 warning("java.lang.ArrayStoreException has not been initialized");
3374 3400 warning("java.lang.ArithmeticException has not been initialized");
3375 3401 warning("java.lang.StackOverflowError has not been initialized");
3376 3402 warning("java.lang.IllegalArgumentException has not been initialized");
3377 3403 }
3378 3404 }
3379 3405
3380 3406 // See : bugid 4211085.
3381 3407 // Background : the static initializer of java.lang.Compiler tries to read
3382 3408 // property"java.compiler" and read & write property "java.vm.info".
3383 3409 // When a security manager is installed through the command line
3384 3410 // option "-Djava.security.manager", the above properties are not
3385 3411 // readable and the static initializer for java.lang.Compiler fails
3386 3412 // resulting in a NoClassDefFoundError. This can happen in any
3387 3413 // user code which calls methods in java.lang.Compiler.
3388 3414 // Hack : the hack is to pre-load and initialize this class, so that only
3389 3415 // system domains are on the stack when the properties are read.
3390 3416 // Currently even the AWT code has calls to methods in java.lang.Compiler.
3391 3417 // On the classic VM, java.lang.Compiler is loaded very early to load the JIT.
3392 3418 // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and
3393 3419 // read and write"java.vm.info" in the default policy file. See bugid 4211383
3394 3420 // Once that is done, we should remove this hack.
3395 3421 initialize_class(vmSymbols::java_lang_Compiler(), CHECK_0);
3396 3422
3397 3423 // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to
3398 3424 // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot
3399 3425 // compiler does not get loaded through java.lang.Compiler). "java -version" with the
3400 3426 // hotspot vm says "nojit" all the time which is confusing. So, we reset it here.
3401 3427 // This should also be taken out as soon as 4211383 gets fixed.
3402 3428 reset_vm_info_property(CHECK_0);
3403 3429
3404 3430 quicken_jni_functions();
3405 3431
3406 3432 // Must be run after init_ft which initializes ft_enabled
3407 3433 if (TRACE_INITIALIZE() != JNI_OK) {
3408 3434 vm_exit_during_initialization("Failed to initialize tracing backend");
3409 3435 }
3410 3436
3411 3437 // Set flag that basic initialization has completed. Used by exceptions and various
3412 3438 // debug stuff, that does not work until all basic classes have been initialized.
3413 3439 set_init_completed();
3414 3440
3415 3441 #ifndef USDT2
3416 3442 HS_DTRACE_PROBE(hotspot, vm__init__end);
3417 3443 #else /* USDT2 */
3418 3444 HOTSPOT_VM_INIT_END();
3419 3445 #endif /* USDT2 */
3420 3446
3421 3447 // record VM initialization completion time
3422 3448 Management::record_vm_init_completed();
3423 3449
3424 3450 // Compute system loader. Note that this has to occur after set_init_completed, since
3425 3451 // valid exceptions may be thrown in the process.
3426 3452 // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
3427 3453 // set_init_completed has just been called, causing exceptions not to be shortcut
3428 3454 // anymore. We call vm_exit_during_initialization directly instead.
3429 3455 SystemDictionary::compute_java_system_loader(THREAD);
3430 3456 if (HAS_PENDING_EXCEPTION) {
3431 3457 vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3432 3458 }
3433 3459
3434 3460 #ifndef SERIALGC
3435 3461 // Support for ConcurrentMarkSweep. This should be cleaned up
3436 3462 // and better encapsulated. The ugly nested if test would go away
3437 3463 // once things are properly refactored. XXX YSR
3438 3464 if (UseConcMarkSweepGC || UseG1GC) {
3439 3465 if (UseConcMarkSweepGC) {
3440 3466 ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
3441 3467 } else {
3442 3468 ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
3443 3469 }
3444 3470 if (HAS_PENDING_EXCEPTION) {
3445 3471 vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3446 3472 }
3447 3473 }
3448 3474 #endif // SERIALGC
3449 3475
3450 3476 // Always call even when there are not JVMTI environments yet, since environments
3451 3477 // may be attached late and JVMTI must track phases of VM execution
3452 3478 JvmtiExport::enter_live_phase();
3453 3479
3454 3480 // Signal Dispatcher needs to be started before VMInit event is posted
3455 3481 os::signal_init();
3456 3482
3457 3483 // Start Attach Listener if +StartAttachListener or it can't be started lazily
3458 3484 if (!DisableAttachMechanism) {
3459 3485 if (StartAttachListener || AttachListener::init_at_startup()) {
3460 3486 AttachListener::init();
3461 3487 }
3462 3488 }
3463 3489
3464 3490 // Launch -Xrun agents
3465 3491 // Must be done in the JVMTI live phase so that for backward compatibility the JDWP
3466 3492 // back-end can launch with -Xdebug -Xrunjdwp.
3467 3493 if (!EagerXrunInit && Arguments::init_libraries_at_startup()) {
3468 3494 create_vm_init_libraries();
3469 3495 }
3470 3496
3471 3497 // Notify JVMTI agents that VM initialization is complete - nop if no agents.
3472 3498 JvmtiExport::post_vm_initialized();
3473 3499
3474 3500 if (!TRACE_START()) {
3475 3501 vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3476 3502 }
3477 3503
3478 3504 if (CleanChunkPoolAsync) {
3479 3505 Chunk::start_chunk_pool_cleaner_task();
3480 3506 }
3481 3507
3482 3508 // initialize compiler(s)
3483 3509 CompileBroker::compilation_init();
3484 3510
3485 3511 Management::initialize(THREAD);
3486 3512 if (HAS_PENDING_EXCEPTION) {
3487 3513 // management agent fails to start possibly due to
3488 3514 // configuration problem and is responsible for printing
3489 3515 // stack trace if appropriate. Simply exit VM.
3490 3516 vm_exit(1);
3491 3517 }
3492 3518
3493 3519 if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true);
3494 3520 if (Arguments::has_alloc_profile()) AllocationProfiler::engage();
3495 3521 if (MemProfiling) MemProfiler::engage();
3496 3522 StatSampler::engage();
3497 3523 if (CheckJNICalls) JniPeriodicChecker::engage();
3498 3524
3499 3525 BiasedLocking::init();
3500 3526
3501 3527 if (JDK_Version::current().post_vm_init_hook_enabled()) {
3502 3528 call_postVMInitHook(THREAD);
3503 3529 // The Java side of PostVMInitHook.run must deal with all
3504 3530 // exceptions and provide means of diagnosis.
3505 3531 if (HAS_PENDING_EXCEPTION) {
3506 3532 CLEAR_PENDING_EXCEPTION;
3507 3533 }
3508 3534 }
3509 3535
3510 3536 // Start up the WatcherThread if there are any periodic tasks
3511 3537 // NOTE: All PeriodicTasks should be registered by now. If they
3512 3538 // aren't, late joiners might appear to start slowly (we might
3513 3539 // take a while to process their first tick).
3514 3540 if (PeriodicTask::num_tasks() > 0) {
3515 3541 WatcherThread::start();
3516 3542 }
3517 3543
3518 3544 // Give os specific code one last chance to start
3519 3545 os::init_3();
3520 3546
3521 3547 create_vm_timer.end();
3522 3548 return JNI_OK;
3523 3549 }
3524 3550
3525 3551 // type for the Agent_OnLoad and JVM_OnLoad entry points
3526 3552 extern "C" {
3527 3553 typedef jint (JNICALL *OnLoadEntry_t)(JavaVM *, char *, void *);
3528 3554 }
3529 3555 // Find a command line agent library and return its entry point for
3530 3556 // -agentlib: -agentpath: -Xrun
3531 3557 // num_symbol_entries must be passed-in since only the caller knows the number of symbols in the array.
3532 3558 static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
3533 3559 OnLoadEntry_t on_load_entry = NULL;
3534 3560 void *library = agent->os_lib(); // check if we have looked it up before
3535 3561
3536 3562 if (library == NULL) {
3537 3563 char buffer[JVM_MAXPATHLEN];
3538 3564 char ebuf[1024];
3539 3565 const char *name = agent->name();
3540 3566 const char *msg = "Could not find agent library ";
3541 3567
3542 3568 if (agent->is_absolute_path()) {
3543 3569 library = os::dll_load(name, ebuf, sizeof ebuf);
3544 3570 if (library == NULL) {
3545 3571 const char *sub_msg = " in absolute path, with error: ";
3546 3572 size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
3547 3573 char *buf = NEW_C_HEAP_ARRAY(char, len);
3548 3574 jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
3549 3575 // If we can't find the agent, exit.
3550 3576 vm_exit_during_initialization(buf, NULL);
3551 3577 FREE_C_HEAP_ARRAY(char, buf);
3552 3578 }
3553 3579 } else {
3554 3580 // Try to load the agent from the standard dll directory
3555 3581 os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), name);
3556 3582 library = os::dll_load(buffer, ebuf, sizeof ebuf);
3557 3583 #ifdef KERNEL
3558 3584 // Download instrument dll
3559 3585 if (library == NULL && strcmp(name, "instrument") == 0) {
3560 3586 char *props = Arguments::get_kernel_properties();
3561 3587 char *home = Arguments::get_java_home();
3562 3588 const char *fmt = "%s/bin/java %s -Dkernel.background.download=false"
3563 3589 " sun.jkernel.DownloadManager -download client_jvm";
3564 3590 size_t length = strlen(props) + strlen(home) + strlen(fmt) + 1;
3565 3591 char *cmd = NEW_C_HEAP_ARRAY(char, length);
3566 3592 jio_snprintf(cmd, length, fmt, home, props);
3567 3593 int status = os::fork_and_exec(cmd);
3568 3594 FreeHeap(props);
3569 3595 if (status == -1) {
3570 3596 warning(cmd);
3571 3597 vm_exit_during_initialization("fork_and_exec failed: %s",
3572 3598 strerror(errno));
3573 3599 }
3574 3600 FREE_C_HEAP_ARRAY(char, cmd);
3575 3601 // when this comes back the instrument.dll should be where it belongs.
3576 3602 library = os::dll_load(buffer, ebuf, sizeof ebuf);
3577 3603 }
3578 3604 #endif // KERNEL
3579 3605 if (library == NULL) { // Try the local directory
3580 3606 char ns[1] = {0};
3581 3607 os::dll_build_name(buffer, sizeof(buffer), ns, name);
3582 3608 library = os::dll_load(buffer, ebuf, sizeof ebuf);
3583 3609 if (library == NULL) {
3584 3610 const char *sub_msg = " on the library path, with error: ";
3585 3611 size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
3586 3612 char *buf = NEW_C_HEAP_ARRAY(char, len);
3587 3613 jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
3588 3614 // If we can't find the agent, exit.
3589 3615 vm_exit_during_initialization(buf, NULL);
3590 3616 FREE_C_HEAP_ARRAY(char, buf);
3591 3617 }
3592 3618 }
3593 3619 }
3594 3620 agent->set_os_lib(library);
3595 3621 }
3596 3622
3597 3623 // Find the OnLoad function.
3598 3624 for (size_t symbol_index = 0; symbol_index < num_symbol_entries; symbol_index++) {
3599 3625 on_load_entry = CAST_TO_FN_PTR(OnLoadEntry_t, os::dll_lookup(library, on_load_symbols[symbol_index]));
3600 3626 if (on_load_entry != NULL) break;
3601 3627 }
3602 3628 return on_load_entry;
3603 3629 }
3604 3630
3605 3631 // Find the JVM_OnLoad entry point
3606 3632 static OnLoadEntry_t lookup_jvm_on_load(AgentLibrary* agent) {
3607 3633 const char *on_load_symbols[] = JVM_ONLOAD_SYMBOLS;
3608 3634 return lookup_on_load(agent, on_load_symbols, sizeof(on_load_symbols) / sizeof(char*));
3609 3635 }
3610 3636
3611 3637 // Find the Agent_OnLoad entry point
3612 3638 static OnLoadEntry_t lookup_agent_on_load(AgentLibrary* agent) {
3613 3639 const char *on_load_symbols[] = AGENT_ONLOAD_SYMBOLS;
3614 3640 return lookup_on_load(agent, on_load_symbols, sizeof(on_load_symbols) / sizeof(char*));
3615 3641 }
3616 3642
3617 3643 // For backwards compatibility with -Xrun
3618 3644 // Convert libraries with no JVM_OnLoad, but which have Agent_OnLoad to be
3619 3645 // treated like -agentpath:
3620 3646 // Must be called before agent libraries are created
3621 3647 void Threads::convert_vm_init_libraries_to_agents() {
3622 3648 AgentLibrary* agent;
3623 3649 AgentLibrary* next;
3624 3650
3625 3651 for (agent = Arguments::libraries(); agent != NULL; agent = next) {
3626 3652 next = agent->next(); // cache the next agent now as this agent may get moved off this list
3627 3653 OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
3628 3654
3629 3655 // If there is an JVM_OnLoad function it will get called later,
3630 3656 // otherwise see if there is an Agent_OnLoad
3631 3657 if (on_load_entry == NULL) {
3632 3658 on_load_entry = lookup_agent_on_load(agent);
3633 3659 if (on_load_entry != NULL) {
3634 3660 // switch it to the agent list -- so that Agent_OnLoad will be called,
3635 3661 // JVM_OnLoad won't be attempted and Agent_OnUnload will
3636 3662 Arguments::convert_library_to_agent(agent);
3637 3663 } else {
3638 3664 vm_exit_during_initialization("Could not find JVM_OnLoad or Agent_OnLoad function in the library", agent->name());
3639 3665 }
3640 3666 }
3641 3667 }
3642 3668 }
3643 3669
3644 3670 // Create agents for -agentlib: -agentpath: and converted -Xrun
3645 3671 // Invokes Agent_OnLoad
3646 3672 // Called very early -- before JavaThreads exist
3647 3673 void Threads::create_vm_init_agents() {
3648 3674 extern struct JavaVM_ main_vm;
3649 3675 AgentLibrary* agent;
3650 3676
3651 3677 JvmtiExport::enter_onload_phase();
3652 3678 for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
3653 3679 OnLoadEntry_t on_load_entry = lookup_agent_on_load(agent);
3654 3680
3655 3681 if (on_load_entry != NULL) {
3656 3682 // Invoke the Agent_OnLoad function
3657 3683 jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
3658 3684 if (err != JNI_OK) {
3659 3685 vm_exit_during_initialization("agent library failed to init", agent->name());
3660 3686 }
3661 3687 } else {
3662 3688 vm_exit_during_initialization("Could not find Agent_OnLoad function in the agent library", agent->name());
3663 3689 }
3664 3690 }
3665 3691 JvmtiExport::enter_primordial_phase();
3666 3692 }
3667 3693
3668 3694 extern "C" {
3669 3695 typedef void (JNICALL *Agent_OnUnload_t)(JavaVM *);
3670 3696 }
3671 3697
3672 3698 void Threads::shutdown_vm_agents() {
3673 3699 // Send any Agent_OnUnload notifications
3674 3700 const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
3675 3701 extern struct JavaVM_ main_vm;
3676 3702 for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
3677 3703
3678 3704 // Find the Agent_OnUnload function.
3679 3705 for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
3680 3706 Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
3681 3707 os::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
3682 3708
3683 3709 // Invoke the Agent_OnUnload function
3684 3710 if (unload_entry != NULL) {
3685 3711 JavaThread* thread = JavaThread::current();
3686 3712 ThreadToNativeFromVM ttn(thread);
3687 3713 HandleMark hm(thread);
3688 3714 (*unload_entry)(&main_vm);
3689 3715 break;
3690 3716 }
3691 3717 }
3692 3718 }
3693 3719 }
3694 3720
3695 3721 // Called for after the VM is initialized for -Xrun libraries which have not been converted to agent libraries
3696 3722 // Invokes JVM_OnLoad
3697 3723 void Threads::create_vm_init_libraries() {
3698 3724 extern struct JavaVM_ main_vm;
3699 3725 AgentLibrary* agent;
3700 3726
3701 3727 for (agent = Arguments::libraries(); agent != NULL; agent = agent->next()) {
3702 3728 OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
3703 3729
3704 3730 if (on_load_entry != NULL) {
3705 3731 // Invoke the JVM_OnLoad function
3706 3732 JavaThread* thread = JavaThread::current();
3707 3733 ThreadToNativeFromVM ttn(thread);
3708 3734 HandleMark hm(thread);
3709 3735 jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
3710 3736 if (err != JNI_OK) {
3711 3737 vm_exit_during_initialization("-Xrun library failed to init", agent->name());
3712 3738 }
3713 3739 } else {
3714 3740 vm_exit_during_initialization("Could not find JVM_OnLoad function in -Xrun library", agent->name());
3715 3741 }
3716 3742 }
3717 3743 }
3718 3744
3719 3745 // Last thread running calls java.lang.Shutdown.shutdown()
3720 3746 void JavaThread::invoke_shutdown_hooks() {
3721 3747 HandleMark hm(this);
3722 3748
3723 3749 // We could get here with a pending exception, if so clear it now.
3724 3750 if (this->has_pending_exception()) {
3725 3751 this->clear_pending_exception();
3726 3752 }
3727 3753
3728 3754 EXCEPTION_MARK;
3729 3755 klassOop k =
3730 3756 SystemDictionary::resolve_or_null(vmSymbols::java_lang_Shutdown(),
3731 3757 THREAD);
3732 3758 if (k != NULL) {
3733 3759 // SystemDictionary::resolve_or_null will return null if there was
3734 3760 // an exception. If we cannot load the Shutdown class, just don't
3735 3761 // call Shutdown.shutdown() at all. This will mean the shutdown hooks
3736 3762 // and finalizers (if runFinalizersOnExit is set) won't be run.
3737 3763 // Note that if a shutdown hook was registered or runFinalizersOnExit
3738 3764 // was called, the Shutdown class would have already been loaded
3739 3765 // (Runtime.addShutdownHook and runFinalizersOnExit will load it).
3740 3766 instanceKlassHandle shutdown_klass (THREAD, k);
3741 3767 JavaValue result(T_VOID);
3742 3768 JavaCalls::call_static(&result,
3743 3769 shutdown_klass,
3744 3770 vmSymbols::shutdown_method_name(),
3745 3771 vmSymbols::void_method_signature(),
3746 3772 THREAD);
3747 3773 }
3748 3774 CLEAR_PENDING_EXCEPTION;
3749 3775 }
3750 3776
3751 3777 // Threads::destroy_vm() is normally called from jni_DestroyJavaVM() when
3752 3778 // the program falls off the end of main(). Another VM exit path is through
3753 3779 // vm_exit() when the program calls System.exit() to return a value or when
3754 3780 // there is a serious error in VM. The two shutdown paths are not exactly
3755 3781 // the same, but they share Shutdown.shutdown() at Java level and before_exit()
3756 3782 // and VM_Exit op at VM level.
3757 3783 //
3758 3784 // Shutdown sequence:
3759 3785 // + Wait until we are the last non-daemon thread to execute
3760 3786 // <-- every thing is still working at this moment -->
3761 3787 // + Call java.lang.Shutdown.shutdown(), which will invoke Java level
3762 3788 // shutdown hooks, run finalizers if finalization-on-exit
3763 3789 // + Call before_exit(), prepare for VM exit
3764 3790 // > run VM level shutdown hooks (they are registered through JVM_OnExit(),
3765 3791 // currently the only user of this mechanism is File.deleteOnExit())
3766 3792 // > stop flat profiler, StatSampler, watcher thread, CMS threads,
3767 3793 // post thread end and vm death events to JVMTI,
3768 3794 // stop signal thread
3769 3795 // + Call JavaThread::exit(), it will:
3770 3796 // > release JNI handle blocks, remove stack guard pages
3771 3797 // > remove this thread from Threads list
3772 3798 // <-- no more Java code from this thread after this point -->
3773 3799 // + Stop VM thread, it will bring the remaining VM to a safepoint and stop
3774 3800 // the compiler threads at safepoint
3775 3801 // <-- do not use anything that could get blocked by Safepoint -->
3776 3802 // + Disable tracing at JNI/JVM barriers
3777 3803 // + Set _vm_exited flag for threads that are still running native code
3778 3804 // + Delete this thread
3779 3805 // + Call exit_globals()
3780 3806 // > deletes tty
3781 3807 // > deletes PerfMemory resources
3782 3808 // + Return to caller
3783 3809
3784 3810 bool Threads::destroy_vm() {
3785 3811 JavaThread* thread = JavaThread::current();
3786 3812
3787 3813 // Wait until we are the last non-daemon thread to execute
3788 3814 { MutexLocker nu(Threads_lock);
3789 3815 while (Threads::number_of_non_daemon_threads() > 1 )
3790 3816 // This wait should make safepoint checks, wait without a timeout,
3791 3817 // and wait as a suspend-equivalent condition.
3792 3818 //
3793 3819 // Note: If the FlatProfiler is running and this thread is waiting
3794 3820 // for another non-daemon thread to finish, then the FlatProfiler
3795 3821 // is waiting for the external suspend request on this thread to
3796 3822 // complete. wait_for_ext_suspend_completion() will eventually
3797 3823 // timeout, but that takes time. Making this wait a suspend-
3798 3824 // equivalent condition solves that timeout problem.
3799 3825 //
3800 3826 Threads_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
3801 3827 Mutex::_as_suspend_equivalent_flag);
3802 3828 }
3803 3829
3804 3830 // Hang forever on exit if we are reporting an error.
3805 3831 if (ShowMessageBoxOnError && is_error_reported()) {
3806 3832 os::infinite_sleep();
3807 3833 }
3808 3834 os::wait_for_keypress_at_exit();
3809 3835
3810 3836 if (JDK_Version::is_jdk12x_version()) {
3811 3837 // We are the last thread running, so check if finalizers should be run.
3812 3838 // For 1.3 or later this is done in thread->invoke_shutdown_hooks()
3813 3839 HandleMark rm(thread);
3814 3840 Universe::run_finalizers_on_exit();
3815 3841 } else {
3816 3842 // run Java level shutdown hooks
3817 3843 thread->invoke_shutdown_hooks();
3818 3844 }
3819 3845
3820 3846 before_exit(thread);
3821 3847
3822 3848 thread->exit(true);
3823 3849
3824 3850 // Stop VM thread.
3825 3851 {
3826 3852 // 4945125 The vm thread comes to a safepoint during exit.
3827 3853 // GC vm_operations can get caught at the safepoint, and the
3828 3854 // heap is unparseable if they are caught. Grab the Heap_lock
3829 3855 // to prevent this. The GC vm_operations will not be able to
3830 3856 // queue until after the vm thread is dead.
3831 3857 // After this point, we'll never emerge out of the safepoint before
3832 3858 // the VM exits, so concurrent GC threads do not need to be explicitly
3833 3859 // stopped; they remain inactive until the process exits.
3834 3860 // Note: some concurrent G1 threads may be running during a safepoint,
3835 3861 // but these will not be accessing the heap, just some G1-specific side
3836 3862 // data structures that are not accessed by any other threads but them
3837 3863 // after this point in a terminal safepoint.
3838 3864
3839 3865 MutexLocker ml(Heap_lock);
3840 3866
3841 3867 VMThread::wait_for_vm_thread_exit();
3842 3868 assert(SafepointSynchronize::is_at_safepoint(), "VM thread should exit at Safepoint");
3843 3869 VMThread::destroy();
3844 3870 }
3845 3871
3846 3872 // clean up ideal graph printers
3847 3873 #if defined(COMPILER2) && !defined(PRODUCT)
3848 3874 IdealGraphPrinter::clean_up();
3849 3875 #endif
3850 3876
3851 3877 // Now, all Java threads are gone except daemon threads. Daemon threads
3852 3878 // running Java code or in VM are stopped by the Safepoint. However,
3853 3879 // daemon threads executing native code are still running. But they
3854 3880 // will be stopped at native=>Java/VM barriers. Note that we can't
3855 3881 // simply kill or suspend them, as it is inherently deadlock-prone.
3856 3882
3857 3883 #ifndef PRODUCT
3858 3884 // disable function tracing at JNI/JVM barriers
3859 3885 TraceJNICalls = false;
3860 3886 TraceJVMCalls = false;
3861 3887 TraceRuntimeCalls = false;
3862 3888 #endif
3863 3889
3864 3890 VM_Exit::set_vm_exited();
3865 3891
3866 3892 notify_vm_shutdown();
3867 3893
3868 3894 delete thread;
3869 3895
3870 3896 // exit_globals() will delete tty
3871 3897 exit_globals();
3872 3898
3873 3899 return true;
3874 3900 }
3875 3901
3876 3902
3877 3903 jboolean Threads::is_supported_jni_version_including_1_1(jint version) {
3878 3904 if (version == JNI_VERSION_1_1) return JNI_TRUE;
3879 3905 return is_supported_jni_version(version);
3880 3906 }
3881 3907
3882 3908
3883 3909 jboolean Threads::is_supported_jni_version(jint version) {
3884 3910 if (version == JNI_VERSION_1_2) return JNI_TRUE;
3885 3911 if (version == JNI_VERSION_1_4) return JNI_TRUE;
3886 3912 if (version == JNI_VERSION_1_6) return JNI_TRUE;
3887 3913 return JNI_FALSE;
3888 3914 }
3889 3915
3890 3916
3891 3917 void Threads::add(JavaThread* p, bool force_daemon) {
3892 3918 // The threads lock must be owned at this point
3893 3919 assert_locked_or_safepoint(Threads_lock);
3894 3920
3895 3921 // See the comment for this method in thread.hpp for its purpose and
3896 3922 // why it is called here.
3897 3923 p->initialize_queues();
3898 3924 p->set_next(_thread_list);
3899 3925 _thread_list = p;
3900 3926 _number_of_threads++;
3901 3927 oop threadObj = p->threadObj();
3902 3928 bool daemon = true;
3903 3929 // Bootstrapping problem: threadObj can be null for initial
3904 3930 // JavaThread (or for threads attached via JNI)
3905 3931 if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
3906 3932 _number_of_non_daemon_threads++;
3907 3933 daemon = false;
3908 3934 }
3909 3935
3910 3936 ThreadService::add_thread(p, daemon);
3911 3937
3912 3938 // Possible GC point.
3913 3939 Events::log(p, "Thread added: " INTPTR_FORMAT, p);
3914 3940 }
3915 3941
3916 3942 void Threads::remove(JavaThread* p) {
3917 3943 // Extra scope needed for Thread_lock, so we can check
3918 3944 // that we do not remove thread without safepoint code notice
3919 3945 { MutexLocker ml(Threads_lock);
3920 3946
3921 3947 assert(includes(p), "p must be present");
3922 3948
3923 3949 JavaThread* current = _thread_list;
3924 3950 JavaThread* prev = NULL;
3925 3951
3926 3952 while (current != p) {
3927 3953 prev = current;
3928 3954 current = current->next();
3929 3955 }
3930 3956
3931 3957 if (prev) {
3932 3958 prev->set_next(current->next());
3933 3959 } else {
3934 3960 _thread_list = p->next();
3935 3961 }
3936 3962 _number_of_threads--;
3937 3963 oop threadObj = p->threadObj();
3938 3964 bool daemon = true;
3939 3965 if (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj)) {
3940 3966 _number_of_non_daemon_threads--;
3941 3967 daemon = false;
3942 3968
3943 3969 // Only one thread left, do a notify on the Threads_lock so a thread waiting
3944 3970 // on destroy_vm will wake up.
3945 3971 if (number_of_non_daemon_threads() == 1)
3946 3972 Threads_lock->notify_all();
3947 3973 }
3948 3974 ThreadService::remove_thread(p, daemon);
3949 3975
3950 3976 // Make sure that safepoint code disregard this thread. This is needed since
3951 3977 // the thread might mess around with locks after this point. This can cause it
3952 3978 // to do callbacks into the safepoint code. However, the safepoint code is not aware
3953 3979 // of this thread since it is removed from the queue.
3954 3980 p->set_terminated_value();
3955 3981 } // unlock Threads_lock
3956 3982
3957 3983 // Since Events::log uses a lock, we grab it outside the Threads_lock
3958 3984 Events::log(p, "Thread exited: " INTPTR_FORMAT, p);
3959 3985 }
3960 3986
3961 3987 // Threads_lock must be held when this is called (or must be called during a safepoint)
3962 3988 bool Threads::includes(JavaThread* p) {
3963 3989 assert(Threads_lock->is_locked(), "sanity check");
3964 3990 ALL_JAVA_THREADS(q) {
3965 3991 if (q == p ) {
3966 3992 return true;
3967 3993 }
3968 3994 }
3969 3995 return false;
3970 3996 }
3971 3997
3972 3998 // Operations on the Threads list for GC. These are not explicitly locked,
3973 3999 // but the garbage collector must provide a safe context for them to run.
3974 4000 // In particular, these things should never be called when the Threads_lock
3975 4001 // is held by some other thread. (Note: the Safepoint abstraction also
3976 4002 // uses the Threads_lock to gurantee this property. It also makes sure that
3977 4003 // all threads gets blocked when exiting or starting).
3978 4004
3979 4005 void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
3980 4006 ALL_JAVA_THREADS(p) {
3981 4007 p->oops_do(f, cf);
3982 4008 }
3983 4009 VMThread::vm_thread()->oops_do(f, cf);
3984 4010 }
3985 4011
3986 4012 void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
3987 4013 // Introduce a mechanism allowing parallel threads to claim threads as
3988 4014 // root groups. Overhead should be small enough to use all the time,
3989 4015 // even in sequential code.
3990 4016 SharedHeap* sh = SharedHeap::heap();
3991 4017 // Cannot yet substitute active_workers for n_par_threads
3992 4018 // because of G1CollectedHeap::verify() use of
3993 4019 // SharedHeap::process_strong_roots(). n_par_threads == 0 will
3994 4020 // turn off parallelism in process_strong_roots while active_workers
3995 4021 // is being used for parallelism elsewhere.
3996 4022 bool is_par = sh->n_par_threads() > 0;
3997 4023 assert(!is_par ||
3998 4024 (SharedHeap::heap()->n_par_threads() ==
3999 4025 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
4000 4026 int cp = SharedHeap::heap()->strong_roots_parity();
4001 4027 ALL_JAVA_THREADS(p) {
4002 4028 if (p->claim_oops_do(is_par, cp)) {
4003 4029 p->oops_do(f, cf);
4004 4030 }
4005 4031 }
4006 4032 VMThread* vmt = VMThread::vm_thread();
4007 4033 if (vmt->claim_oops_do(is_par, cp)) {
4008 4034 vmt->oops_do(f, cf);
4009 4035 }
4010 4036 }
4011 4037
4012 4038 #ifndef SERIALGC
4013 4039 // Used by ParallelScavenge
4014 4040 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4015 4041 ALL_JAVA_THREADS(p) {
4016 4042 q->enqueue(new ThreadRootsTask(p));
4017 4043 }
4018 4044 q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4019 4045 }
4020 4046
4021 4047 // Used by Parallel Old
4022 4048 void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
4023 4049 ALL_JAVA_THREADS(p) {
4024 4050 q->enqueue(new ThreadRootsMarkingTask(p));
4025 4051 }
4026 4052 q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
4027 4053 }
4028 4054 #endif // SERIALGC
4029 4055
4030 4056 void Threads::nmethods_do(CodeBlobClosure* cf) {
4031 4057 ALL_JAVA_THREADS(p) {
4032 4058 p->nmethods_do(cf);
4033 4059 }
4034 4060 VMThread::vm_thread()->nmethods_do(cf);
4035 4061 }
4036 4062
4037 4063 void Threads::gc_epilogue() {
4038 4064 ALL_JAVA_THREADS(p) {
4039 4065 p->gc_epilogue();
4040 4066 }
4041 4067 }
4042 4068
4043 4069 void Threads::gc_prologue() {
4044 4070 ALL_JAVA_THREADS(p) {
4045 4071 p->gc_prologue();
4046 4072 }
4047 4073 }
4048 4074
4049 4075 void Threads::deoptimized_wrt_marked_nmethods() {
4050 4076 ALL_JAVA_THREADS(p) {
4051 4077 p->deoptimized_wrt_marked_nmethods();
4052 4078 }
4053 4079 }
4054 4080
4055 4081
4056 4082 // Get count Java threads that are waiting to enter the specified monitor.
4057 4083 GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
4058 4084 address monitor, bool doLock) {
4059 4085 assert(doLock || SafepointSynchronize::is_at_safepoint(),
4060 4086 "must grab Threads_lock or be at safepoint");
4061 4087 GrowableArray<JavaThread*>* result = new GrowableArray<JavaThread*>(count);
4062 4088
4063 4089 int i = 0;
4064 4090 {
4065 4091 MutexLockerEx ml(doLock ? Threads_lock : NULL);
4066 4092 ALL_JAVA_THREADS(p) {
4067 4093 if (p->is_Compiler_thread()) continue;
4068 4094
4069 4095 address pending = (address)p->current_pending_monitor();
4070 4096 if (pending == monitor) { // found a match
4071 4097 if (i < count) result->append(p); // save the first count matches
4072 4098 i++;
4073 4099 }
4074 4100 }
4075 4101 }
4076 4102 return result;
4077 4103 }
4078 4104
4079 4105
4080 4106 JavaThread *Threads::owning_thread_from_monitor_owner(address owner, bool doLock) {
4081 4107 assert(doLock ||
4082 4108 Threads_lock->owned_by_self() ||
4083 4109 SafepointSynchronize::is_at_safepoint(),
4084 4110 "must grab Threads_lock or be at safepoint");
4085 4111
4086 4112 // NULL owner means not locked so we can skip the search
4087 4113 if (owner == NULL) return NULL;
4088 4114
4089 4115 {
4090 4116 MutexLockerEx ml(doLock ? Threads_lock : NULL);
4091 4117 ALL_JAVA_THREADS(p) {
4092 4118 // first, see if owner is the address of a Java thread
4093 4119 if (owner == (address)p) return p;
4094 4120 }
4095 4121 }
4096 4122 assert(UseHeavyMonitors == false, "Did not find owning Java thread with UseHeavyMonitors enabled");
4097 4123 if (UseHeavyMonitors) return NULL;
4098 4124
4099 4125 //
4100 4126 // If we didn't find a matching Java thread and we didn't force use of
4101 4127 // heavyweight monitors, then the owner is the stack address of the
4102 4128 // Lock Word in the owning Java thread's stack.
4103 4129 //
4104 4130 JavaThread* the_owner = NULL;
4105 4131 {
4106 4132 MutexLockerEx ml(doLock ? Threads_lock : NULL);
4107 4133 ALL_JAVA_THREADS(q) {
4108 4134 if (q->is_lock_owned(owner)) {
4109 4135 the_owner = q;
4110 4136 break;
4111 4137 }
4112 4138 }
4113 4139 }
4114 4140 assert(the_owner != NULL, "Did not find owning Java thread for lock word address");
4115 4141 return the_owner;
4116 4142 }
4117 4143
4118 4144 // Threads::print_on() is called at safepoint by VM_PrintThreads operation.
4119 4145 void Threads::print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks) {
4120 4146 char buf[32];
4121 4147 st->print_cr(os::local_time_string(buf, sizeof(buf)));
4122 4148
4123 4149 st->print_cr("Full thread dump %s (%s %s):",
4124 4150 Abstract_VM_Version::vm_name(),
4125 4151 Abstract_VM_Version::vm_release(),
4126 4152 Abstract_VM_Version::vm_info_string()
4127 4153 );
4128 4154 st->cr();
4129 4155
4130 4156 #ifndef SERIALGC
4131 4157 // Dump concurrent locks
4132 4158 ConcurrentLocksDump concurrent_locks;
4133 4159 if (print_concurrent_locks) {
4134 4160 concurrent_locks.dump_at_safepoint();
4135 4161 }
4136 4162 #endif // SERIALGC
4137 4163
4138 4164 ALL_JAVA_THREADS(p) {
4139 4165 ResourceMark rm;
4140 4166 p->print_on(st);
4141 4167 if (print_stacks) {
4142 4168 if (internal_format) {
4143 4169 p->trace_stack();
4144 4170 } else {
4145 4171 p->print_stack_on(st);
4146 4172 }
4147 4173 }
4148 4174 st->cr();
4149 4175 #ifndef SERIALGC
4150 4176 if (print_concurrent_locks) {
4151 4177 concurrent_locks.print_locks_on(p, st);
4152 4178 }
4153 4179 #endif // SERIALGC
4154 4180 }
4155 4181
4156 4182 VMThread::vm_thread()->print_on(st);
4157 4183 st->cr();
4158 4184 Universe::heap()->print_gc_threads_on(st);
4159 4185 WatcherThread* wt = WatcherThread::watcher_thread();
4160 4186 if (wt != NULL) wt->print_on(st);
4161 4187 st->cr();
4162 4188 CompileBroker::print_compiler_threads_on(st);
4163 4189 st->flush();
4164 4190 }
4165 4191
4166 4192 // Threads::print_on_error() is called by fatal error handler. It's possible
4167 4193 // that VM is not at safepoint and/or current thread is inside signal handler.
4168 4194 // Don't print stack trace, as the stack may not be walkable. Don't allocate
4169 4195 // memory (even in resource area), it might deadlock the error handler.
4170 4196 void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int buflen) {
4171 4197 bool found_current = false;
4172 4198 st->print_cr("Java Threads: ( => current thread )");
4173 4199 ALL_JAVA_THREADS(thread) {
4174 4200 bool is_current = (current == thread);
4175 4201 found_current = found_current || is_current;
4176 4202
4177 4203 st->print("%s", is_current ? "=>" : " ");
4178 4204
4179 4205 st->print(PTR_FORMAT, thread);
4180 4206 st->print(" ");
4181 4207 thread->print_on_error(st, buf, buflen);
4182 4208 st->cr();
4183 4209 }
4184 4210 st->cr();
4185 4211
4186 4212 st->print_cr("Other Threads:");
4187 4213 if (VMThread::vm_thread()) {
4188 4214 bool is_current = (current == VMThread::vm_thread());
4189 4215 found_current = found_current || is_current;
4190 4216 st->print("%s", current == VMThread::vm_thread() ? "=>" : " ");
4191 4217
4192 4218 st->print(PTR_FORMAT, VMThread::vm_thread());
4193 4219 st->print(" ");
4194 4220 VMThread::vm_thread()->print_on_error(st, buf, buflen);
4195 4221 st->cr();
4196 4222 }
4197 4223 WatcherThread* wt = WatcherThread::watcher_thread();
4198 4224 if (wt != NULL) {
4199 4225 bool is_current = (current == wt);
4200 4226 found_current = found_current || is_current;
4201 4227 st->print("%s", is_current ? "=>" : " ");
4202 4228
4203 4229 st->print(PTR_FORMAT, wt);
4204 4230 st->print(" ");
4205 4231 wt->print_on_error(st, buf, buflen);
4206 4232 st->cr();
4207 4233 }
4208 4234 if (!found_current) {
4209 4235 st->cr();
4210 4236 st->print("=>" PTR_FORMAT " (exited) ", current);
4211 4237 current->print_on_error(st, buf, buflen);
4212 4238 st->cr();
4213 4239 }
4214 4240 }
4215 4241
4216 4242 // Internal SpinLock and Mutex
4217 4243 // Based on ParkEvent
4218 4244
4219 4245 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
4220 4246 //
4221 4247 // We employ SpinLocks _only for low-contention, fixed-length
4222 4248 // short-duration critical sections where we're concerned
4223 4249 // about native mutex_t or HotSpot Mutex:: latency.
4224 4250 // The mux construct provides a spin-then-block mutual exclusion
4225 4251 // mechanism.
4226 4252 //
4227 4253 // Testing has shown that contention on the ListLock guarding gFreeList
4228 4254 // is common. If we implement ListLock as a simple SpinLock it's common
4229 4255 // for the JVM to devolve to yielding with little progress. This is true
4230 4256 // despite the fact that the critical sections protected by ListLock are
4231 4257 // extremely short.
4232 4258 //
4233 4259 // TODO-FIXME: ListLock should be of type SpinLock.
4234 4260 // We should make this a 1st-class type, integrated into the lock
4235 4261 // hierarchy as leaf-locks. Critically, the SpinLock structure
4236 4262 // should have sufficient padding to avoid false-sharing and excessive
4237 4263 // cache-coherency traffic.
4238 4264
4239 4265
4240 4266 typedef volatile int SpinLockT ;
4241 4267
4242 4268 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
4243 4269 if (Atomic::cmpxchg (1, adr, 0) == 0) {
4244 4270 return ; // normal fast-path return
4245 4271 }
4246 4272
4247 4273 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
4248 4274 TEVENT (SpinAcquire - ctx) ;
4249 4275 int ctr = 0 ;
4250 4276 int Yields = 0 ;
4251 4277 for (;;) {
4252 4278 while (*adr != 0) {
4253 4279 ++ctr ;
4254 4280 if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
4255 4281 if (Yields > 5) {
4256 4282 // Consider using a simple NakedSleep() instead.
4257 4283 // Then SpinAcquire could be called by non-JVM threads
4258 4284 Thread::current()->_ParkEvent->park(1) ;
4259 4285 } else {
4260 4286 os::NakedYield() ;
4261 4287 ++Yields ;
4262 4288 }
4263 4289 } else {
4264 4290 SpinPause() ;
4265 4291 }
4266 4292 }
4267 4293 if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
4268 4294 }
4269 4295 }
4270 4296
4271 4297 void Thread::SpinRelease (volatile int * adr) {
4272 4298 assert (*adr != 0, "invariant") ;
4273 4299 OrderAccess::fence() ; // guarantee at least release consistency.
4274 4300 // Roach-motel semantics.
4275 4301 // It's safe if subsequent LDs and STs float "up" into the critical section,
4276 4302 // but prior LDs and STs within the critical section can't be allowed
4277 4303 // to reorder or float past the ST that releases the lock.
4278 4304 *adr = 0 ;
4279 4305 }
4280 4306
4281 4307 // muxAcquire and muxRelease:
4282 4308 //
4283 4309 // * muxAcquire and muxRelease support a single-word lock-word construct.
4284 4310 // The LSB of the word is set IFF the lock is held.
4285 4311 // The remainder of the word points to the head of a singly-linked list
4286 4312 // of threads blocked on the lock.
4287 4313 //
4288 4314 // * The current implementation of muxAcquire-muxRelease uses its own
4289 4315 // dedicated Thread._MuxEvent instance. If we're interested in
4290 4316 // minimizing the peak number of extant ParkEvent instances then
4291 4317 // we could eliminate _MuxEvent and "borrow" _ParkEvent as long
4292 4318 // as certain invariants were satisfied. Specifically, care would need
4293 4319 // to be taken with regards to consuming unpark() "permits".
4294 4320 // A safe rule of thumb is that a thread would never call muxAcquire()
4295 4321 // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
4296 4322 // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
4297 4323 // consume an unpark() permit intended for monitorenter, for instance.
4298 4324 // One way around this would be to widen the restricted-range semaphore
4299 4325 // implemented in park(). Another alternative would be to provide
4300 4326 // multiple instances of the PlatformEvent() for each thread. One
4301 4327 // instance would be dedicated to muxAcquire-muxRelease, for instance.
4302 4328 //
4303 4329 // * Usage:
4304 4330 // -- Only as leaf locks
4305 4331 // -- for short-term locking only as muxAcquire does not perform
4306 4332 // thread state transitions.
4307 4333 //
4308 4334 // Alternatives:
4309 4335 // * We could implement muxAcquire and muxRelease with MCS or CLH locks
4310 4336 // but with parking or spin-then-park instead of pure spinning.
4311 4337 // * Use Taura-Oyama-Yonenzawa locks.
4312 4338 // * It's possible to construct a 1-0 lock if we encode the lockword as
4313 4339 // (List,LockByte). Acquire will CAS the full lockword while Release
4314 4340 // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
4315 4341 // acquiring threads use timers (ParkTimed) to detect and recover from
4316 4342 // the stranding window. Thread/Node structures must be aligned on 256-byte
4317 4343 // boundaries by using placement-new.
4318 4344 // * Augment MCS with advisory back-link fields maintained with CAS().
4319 4345 // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
4320 4346 // The validity of the backlinks must be ratified before we trust the value.
4321 4347 // If the backlinks are invalid the exiting thread must back-track through the
4322 4348 // the forward links, which are always trustworthy.
4323 4349 // * Add a successor indication. The LockWord is currently encoded as
4324 4350 // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
4325 4351 // to provide the usual futile-wakeup optimization.
4326 4352 // See RTStt for details.
4327 4353 // * Consider schedctl.sc_nopreempt to cover the critical section.
4328 4354 //
4329 4355
4330 4356
4331 4357 typedef volatile intptr_t MutexT ; // Mux Lock-word
4332 4358 enum MuxBits { LOCKBIT = 1 } ;
4333 4359
4334 4360 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
4335 4361 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
4336 4362 if (w == 0) return ;
4337 4363 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4338 4364 return ;
4339 4365 }
4340 4366
4341 4367 TEVENT (muxAcquire - Contention) ;
4342 4368 ParkEvent * const Self = Thread::current()->_MuxEvent ;
4343 4369 assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
4344 4370 for (;;) {
4345 4371 int its = (os::is_MP() ? 100 : 0) + 1 ;
4346 4372
4347 4373 // Optional spin phase: spin-then-park strategy
4348 4374 while (--its >= 0) {
4349 4375 w = *Lock ;
4350 4376 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4351 4377 return ;
4352 4378 }
4353 4379 }
4354 4380
4355 4381 Self->reset() ;
4356 4382 Self->OnList = intptr_t(Lock) ;
4357 4383 // The following fence() isn't _strictly necessary as the subsequent
4358 4384 // CAS() both serializes execution and ratifies the fetched *Lock value.
4359 4385 OrderAccess::fence();
4360 4386 for (;;) {
4361 4387 w = *Lock ;
4362 4388 if ((w & LOCKBIT) == 0) {
4363 4389 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4364 4390 Self->OnList = 0 ; // hygiene - allows stronger asserts
4365 4391 return ;
4366 4392 }
4367 4393 continue ; // Interference -- *Lock changed -- Just retry
4368 4394 }
4369 4395 assert (w & LOCKBIT, "invariant") ;
4370 4396 Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
4371 4397 if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
4372 4398 }
4373 4399
4374 4400 while (Self->OnList != 0) {
4375 4401 Self->park() ;
4376 4402 }
4377 4403 }
4378 4404 }
4379 4405
4380 4406 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
4381 4407 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
4382 4408 if (w == 0) return ;
4383 4409 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4384 4410 return ;
4385 4411 }
4386 4412
4387 4413 TEVENT (muxAcquire - Contention) ;
4388 4414 ParkEvent * ReleaseAfter = NULL ;
4389 4415 if (ev == NULL) {
4390 4416 ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
4391 4417 }
4392 4418 assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
4393 4419 for (;;) {
4394 4420 guarantee (ev->OnList == 0, "invariant") ;
4395 4421 int its = (os::is_MP() ? 100 : 0) + 1 ;
4396 4422
4397 4423 // Optional spin phase: spin-then-park strategy
4398 4424 while (--its >= 0) {
4399 4425 w = *Lock ;
4400 4426 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4401 4427 if (ReleaseAfter != NULL) {
4402 4428 ParkEvent::Release (ReleaseAfter) ;
4403 4429 }
4404 4430 return ;
4405 4431 }
4406 4432 }
4407 4433
4408 4434 ev->reset() ;
4409 4435 ev->OnList = intptr_t(Lock) ;
4410 4436 // The following fence() isn't _strictly necessary as the subsequent
4411 4437 // CAS() both serializes execution and ratifies the fetched *Lock value.
4412 4438 OrderAccess::fence();
4413 4439 for (;;) {
4414 4440 w = *Lock ;
4415 4441 if ((w & LOCKBIT) == 0) {
4416 4442 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
4417 4443 ev->OnList = 0 ;
4418 4444 // We call ::Release while holding the outer lock, thus
4419 4445 // artificially lengthening the critical section.
4420 4446 // Consider deferring the ::Release() until the subsequent unlock(),
4421 4447 // after we've dropped the outer lock.
4422 4448 if (ReleaseAfter != NULL) {
4423 4449 ParkEvent::Release (ReleaseAfter) ;
4424 4450 }
4425 4451 return ;
4426 4452 }
4427 4453 continue ; // Interference -- *Lock changed -- Just retry
4428 4454 }
4429 4455 assert (w & LOCKBIT, "invariant") ;
4430 4456 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
4431 4457 if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
4432 4458 }
4433 4459
4434 4460 while (ev->OnList != 0) {
4435 4461 ev->park() ;
4436 4462 }
4437 4463 }
4438 4464 }
4439 4465
4440 4466 // Release() must extract a successor from the list and then wake that thread.
4441 4467 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
4442 4468 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
4443 4469 // Release() would :
4444 4470 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
4445 4471 // (B) Extract a successor from the private list "in-hand"
4446 4472 // (C) attempt to CAS() the residual back into *Lock over null.
4447 4473 // If there were any newly arrived threads and the CAS() would fail.
4448 4474 // In that case Release() would detach the RATs, re-merge the list in-hand
4449 4475 // with the RATs and repeat as needed. Alternately, Release() might
4450 4476 // detach and extract a successor, but then pass the residual list to the wakee.
4451 4477 // The wakee would be responsible for reattaching and remerging before it
4452 4478 // competed for the lock.
4453 4479 //
4454 4480 // Both "pop" and DMR are immune from ABA corruption -- there can be
4455 4481 // multiple concurrent pushers, but only one popper or detacher.
4456 4482 // This implementation pops from the head of the list. This is unfair,
4457 4483 // but tends to provide excellent throughput as hot threads remain hot.
4458 4484 // (We wake recently run threads first).
4459 4485
4460 4486 void Thread::muxRelease (volatile intptr_t * Lock) {
4461 4487 for (;;) {
4462 4488 const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
4463 4489 assert (w & LOCKBIT, "invariant") ;
4464 4490 if (w == LOCKBIT) return ;
4465 4491 ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
4466 4492 assert (List != NULL, "invariant") ;
4467 4493 assert (List->OnList == intptr_t(Lock), "invariant") ;
4468 4494 ParkEvent * nxt = List->ListNext ;
4469 4495
4470 4496 // The following CAS() releases the lock and pops the head element.
4471 4497 if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
4472 4498 continue ;
4473 4499 }
4474 4500 List->OnList = 0 ;
4475 4501 OrderAccess::fence() ;
4476 4502 List->unpark () ;
4477 4503 return ;
4478 4504 }
4479 4505 }
4480 4506
4481 4507
4482 4508 void Threads::verify() {
4483 4509 ALL_JAVA_THREADS(p) {
4484 4510 p->verify();
4485 4511 }
4486 4512 VMThread* thread = VMThread::vm_thread();
4487 4513 if (thread != NULL) thread->verify();
4488 4514 }
↓ open down ↓ |
1124 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX