287 }
288
289 void Thread::initialize_thread_local_storage() {
290 // Note: Make sure this method only calls
291 // non-blocking operations. Otherwise, it might not work
292 // with the thread-startup/safepoint interaction.
293
294 // During Java thread startup, safepoint code should allow this
295 // method to complete because it may need to allocate memory to
296 // store information for the new thread.
297
298 // initialize structure dependent on thread local storage
299 ThreadLocalStorage::set_thread(this);
300 }
301
302 void Thread::record_stack_base_and_size() {
303 set_stack_base(os::current_stack_base());
304 set_stack_size(os::current_stack_size());
305 if (is_Java_thread()) {
306 ((JavaThread*) this)->set_stack_overflow_limit();
307 }
308 // CR 7190089: on Solaris, primordial thread's stack is adjusted
309 // in initialize_thread(). Without the adjustment, stack size is
310 // incorrect if stack is set to unlimited (ulimit -s unlimited).
311 // So far, only Solaris has real implementation of initialize_thread().
312 //
313 // set up any platform-specific state.
314 os::initialize_thread(this);
315
316 #if INCLUDE_NMT
317 // record thread's native stack, stack grows downward
318 address stack_low_addr = stack_base() - stack_size();
319 MemTracker::record_thread_stack(stack_low_addr, stack_size());
320 #endif // INCLUDE_NMT
321 }
322
323
324 Thread::~Thread() {
325 // Reclaim the objectmonitors from the omFreeList of the moribund thread.
326 ObjectSynchronizer::omFlush(this);
891 if (GCALotAtAllSafepoints) {
892 // We could enter a safepoint here and thus have a gc
893 InterfaceSupport::check_gc_alot();
894 }
895 #endif
896 }
897 #endif
898
899 bool Thread::is_in_stack(address adr) const {
900 assert(Thread::current() == this, "is_in_stack can only be called from current thread");
901 address end = os::current_stack_pointer();
902 // Allow non Java threads to call this without stack_base
903 if (_stack_base == NULL) return true;
904 if (stack_base() >= adr && adr >= end) return true;
905
906 return false;
907 }
908
909
910 bool Thread::is_in_usable_stack(address adr) const {
911 size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
912 size_t usable_stack_size = _stack_size - stack_guard_size;
913
914 return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
915 }
916
917
918 // We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
919 // However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
920 // used for compilation in the future. If that change is made, the need for these methods
921 // should be revisited, and they should be removed if possible.
922
923 bool Thread::is_lock_owned(address adr) const {
924 return on_local_stack(adr);
925 }
926
927 bool Thread::set_as_starting_thread() {
928 // NOTE: this must be called inside the main thread.
929 return os::create_main_thread((JavaThread*)this);
930 }
931
1447 _privileged_stack_top = NULL;
1448 _array_for_gc = NULL;
1449 _suspend_equivalent = false;
1450 _in_deopt_handler = 0;
1451 _doing_unsafe_access = false;
1452 _stack_guard_state = stack_guard_unused;
1453 #if INCLUDE_JVMCI
1454 _pending_monitorenter = false;
1455 _pending_deoptimization = -1;
1456 _pending_failed_speculation = NULL;
1457 _pending_transfer_to_interpreter = false;
1458 _jvmci._alternate_call_target = NULL;
1459 assert(_jvmci._implicit_exception_pc == NULL, "must be");
1460 if (JVMCICounterSize > 0) {
1461 _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
1462 memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize);
1463 } else {
1464 _jvmci_counters = NULL;
1465 }
1466 #endif // INCLUDE_JVMCI
1467 (void)const_cast<oop&>(_exception_oop = oop(NULL));
1468 _exception_pc = 0;
1469 _exception_handler_pc = 0;
1470 _is_method_handle_return = 0;
1471 _jvmti_thread_state= NULL;
1472 _should_post_on_exceptions_flag = JNI_FALSE;
1473 _jvmti_get_loaded_classes_closure = NULL;
1474 _interp_only_mode = 0;
1475 _special_runtime_exit_condition = _no_async_condition;
1476 _pending_async_exception = NULL;
1477 _thread_stat = NULL;
1478 _thread_stat = new ThreadStatistics();
1479 _blocked_on_compilation = false;
1480 _jni_active_critical = 0;
1481 _pending_jni_exception_check_fn = NULL;
1482 _do_not_unlock_if_synchronized = false;
1483 _cached_monitor_info = NULL;
1484 _parker = Parker::Allocate(this);
1485
1486 #ifndef PRODUCT
1519 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
1520 #endif // INCLUDE_ALL_GCS
1521
1522 JavaThread::JavaThread(bool is_attaching_via_jni) :
1523 Thread()
1524 #if INCLUDE_ALL_GCS
1525 , _satb_mark_queue(&_satb_mark_queue_set),
1526 _dirty_card_queue(&_dirty_card_queue_set)
1527 #endif // INCLUDE_ALL_GCS
1528 {
1529 initialize();
1530 if (is_attaching_via_jni) {
1531 _jni_attach_state = _attaching_via_jni;
1532 } else {
1533 _jni_attach_state = _not_attaching_via_jni;
1534 }
1535 assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1536 }
1537
1538 bool JavaThread::reguard_stack(address cur_sp) {
1539 if (_stack_guard_state != stack_guard_yellow_disabled) {
1540 return true; // Stack already guarded or guard pages not needed.
1541 }
1542
1543 if (register_stack_overflow()) {
1544 // For those architectures which have separate register and
1545 // memory stacks, we must check the register stack to see if
1546 // it has overflowed.
1547 return false;
1548 }
1549
1550 // Java code never executes within the yellow zone: the latter is only
1551 // there to provoke an exception during stack banging. If java code
1552 // is executing there, either StackShadowPages should be larger, or
1553 // some exception code in c1, c2 or the interpreter isn't unwinding
1554 // when it should.
1555 guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
1556
1557 enable_stack_yellow_zone();
1558 return true;
1559 }
1560
1561 bool JavaThread::reguard_stack(void) {
1562 return reguard_stack(os::current_stack_pointer());
1563 }
1564
1565
1566 void JavaThread::block_if_vm_exited() {
1567 if (_terminated == _vm_exited) {
1568 // _vm_exited is set at safepoint, and Threads_lock is never released
1569 // we will block here forever
1570 Threads_lock->lock_without_safepoint_check();
1571 ShouldNotReachHere();
1572 }
1573 }
1574
1575
1576 // Remove this ifdef when C1 is ported to the compiler interface.
1577 static void compiler_thread_entry(JavaThread* thread, TRAPS);
2467
2468 // Sanity check: thread is gone, has started exiting or the thread
2469 // was not externally suspended.
2470 if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
2471 return;
2472 }
2473
2474 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
2475
2476 clear_external_suspend();
2477
2478 if (is_ext_suspended()) {
2479 clear_ext_suspended();
2480 SR_lock()->notify_all();
2481 }
2482 }
2483
2484 void JavaThread::create_stack_guard_pages() {
2485 if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
2486 address low_addr = stack_base() - stack_size();
2487 size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
2488
2489 int allocate = os::allocate_stack_guard_pages();
2490 // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
2491
2492 if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
2493 warning("Attempt to allocate stack guard pages failed.");
2494 return;
2495 }
2496
2497 if (os::guard_memory((char *) low_addr, len)) {
2498 _stack_guard_state = stack_guard_enabled;
2499 } else {
2500 warning("Attempt to protect stack guard pages failed.");
2501 if (os::uncommit_memory((char *) low_addr, len)) {
2502 warning("Attempt to deallocate stack guard pages failed.");
2503 }
2504 }
2505 }
2506
2507 void JavaThread::remove_stack_guard_pages() {
2508 assert(Thread::current() == this, "from different thread");
2509 if (_stack_guard_state == stack_guard_unused) return;
2510 address low_addr = stack_base() - stack_size();
2511 size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
2512
2513 if (os::allocate_stack_guard_pages()) {
2514 if (os::remove_stack_guard_pages((char *) low_addr, len)) {
2515 _stack_guard_state = stack_guard_unused;
2516 } else {
2517 warning("Attempt to deallocate stack guard pages failed.");
2518 }
2519 } else {
2520 if (_stack_guard_state == stack_guard_unused) return;
2521 if (os::unguard_memory((char *) low_addr, len)) {
2522 _stack_guard_state = stack_guard_unused;
2523 } else {
2524 warning("Attempt to unprotect stack guard pages failed.");
2525 }
2526 }
2527 }
2528
2529 void JavaThread::enable_stack_yellow_zone() {
2530 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2531 assert(_stack_guard_state != stack_guard_enabled, "already enabled");
2532
2533 // The base notation is from the stacks point of view, growing downward.
2534 // We need to adjust it to work correctly with guard_memory()
2535 address base = stack_yellow_zone_base() - stack_yellow_zone_size();
2536
2537 guarantee(base < stack_base(), "Error calculating stack yellow zone");
2538 guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
2539
2540 if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
2541 _stack_guard_state = stack_guard_enabled;
2542 } else {
2543 warning("Attempt to guard stack yellow zone failed.");
2544 }
2545 enable_register_stack_guard();
2546 }
2547
2548 void JavaThread::disable_stack_yellow_zone() {
|
287 }
288
289 void Thread::initialize_thread_local_storage() {
290 // Note: Make sure this method only calls
291 // non-blocking operations. Otherwise, it might not work
292 // with the thread-startup/safepoint interaction.
293
294 // During Java thread startup, safepoint code should allow this
295 // method to complete because it may need to allocate memory to
296 // store information for the new thread.
297
298 // initialize structure dependent on thread local storage
299 ThreadLocalStorage::set_thread(this);
300 }
301
302 void Thread::record_stack_base_and_size() {
303 set_stack_base(os::current_stack_base());
304 set_stack_size(os::current_stack_size());
305 if (is_Java_thread()) {
306 ((JavaThread*) this)->set_stack_overflow_limit();
307 ((JavaThread*) this)->set_reserved_stack_activation((intptr_t*)stack_base());
308 }
309 // CR 7190089: on Solaris, primordial thread's stack is adjusted
310 // in initialize_thread(). Without the adjustment, stack size is
311 // incorrect if stack is set to unlimited (ulimit -s unlimited).
312 // So far, only Solaris has real implementation of initialize_thread().
313 //
314 // set up any platform-specific state.
315 os::initialize_thread(this);
316
317 #if INCLUDE_NMT
318 // record thread's native stack, stack grows downward
319 address stack_low_addr = stack_base() - stack_size();
320 MemTracker::record_thread_stack(stack_low_addr, stack_size());
321 #endif // INCLUDE_NMT
322 }
323
324
325 Thread::~Thread() {
326 // Reclaim the objectmonitors from the omFreeList of the moribund thread.
327 ObjectSynchronizer::omFlush(this);
892 if (GCALotAtAllSafepoints) {
893 // We could enter a safepoint here and thus have a gc
894 InterfaceSupport::check_gc_alot();
895 }
896 #endif
897 }
898 #endif
899
900 bool Thread::is_in_stack(address adr) const {
901 assert(Thread::current() == this, "is_in_stack can only be called from current thread");
902 address end = os::current_stack_pointer();
903 // Allow non Java threads to call this without stack_base
904 if (_stack_base == NULL) return true;
905 if (stack_base() >= adr && adr >= end) return true;
906
907 return false;
908 }
909
910
911 bool Thread::is_in_usable_stack(address adr) const {
912 size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
913 size_t usable_stack_size = _stack_size - stack_guard_size;
914
915 return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
916 }
917
918
919 // We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
920 // However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
921 // used for compilation in the future. If that change is made, the need for these methods
922 // should be revisited, and they should be removed if possible.
923
924 bool Thread::is_lock_owned(address adr) const {
925 return on_local_stack(adr);
926 }
927
928 bool Thread::set_as_starting_thread() {
929 // NOTE: this must be called inside the main thread.
930 return os::create_main_thread((JavaThread*)this);
931 }
932
1448 _privileged_stack_top = NULL;
1449 _array_for_gc = NULL;
1450 _suspend_equivalent = false;
1451 _in_deopt_handler = 0;
1452 _doing_unsafe_access = false;
1453 _stack_guard_state = stack_guard_unused;
1454 #if INCLUDE_JVMCI
1455 _pending_monitorenter = false;
1456 _pending_deoptimization = -1;
1457 _pending_failed_speculation = NULL;
1458 _pending_transfer_to_interpreter = false;
1459 _jvmci._alternate_call_target = NULL;
1460 assert(_jvmci._implicit_exception_pc == NULL, "must be");
1461 if (JVMCICounterSize > 0) {
1462 _jvmci_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtInternal);
1463 memset(_jvmci_counters, 0, sizeof(jlong) * JVMCICounterSize);
1464 } else {
1465 _jvmci_counters = NULL;
1466 }
1467 #endif // INCLUDE_JVMCI
1468 _reserved_stack_activation = NULL; // stack base not known yet
1469 (void)const_cast<oop&>(_exception_oop = oop(NULL));
1470 _exception_pc = 0;
1471 _exception_handler_pc = 0;
1472 _is_method_handle_return = 0;
1473 _jvmti_thread_state= NULL;
1474 _should_post_on_exceptions_flag = JNI_FALSE;
1475 _jvmti_get_loaded_classes_closure = NULL;
1476 _interp_only_mode = 0;
1477 _special_runtime_exit_condition = _no_async_condition;
1478 _pending_async_exception = NULL;
1479 _thread_stat = NULL;
1480 _thread_stat = new ThreadStatistics();
1481 _blocked_on_compilation = false;
1482 _jni_active_critical = 0;
1483 _pending_jni_exception_check_fn = NULL;
1484 _do_not_unlock_if_synchronized = false;
1485 _cached_monitor_info = NULL;
1486 _parker = Parker::Allocate(this);
1487
1488 #ifndef PRODUCT
1521 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
1522 #endif // INCLUDE_ALL_GCS
1523
1524 JavaThread::JavaThread(bool is_attaching_via_jni) :
1525 Thread()
1526 #if INCLUDE_ALL_GCS
1527 , _satb_mark_queue(&_satb_mark_queue_set),
1528 _dirty_card_queue(&_dirty_card_queue_set)
1529 #endif // INCLUDE_ALL_GCS
1530 {
1531 initialize();
1532 if (is_attaching_via_jni) {
1533 _jni_attach_state = _attaching_via_jni;
1534 } else {
1535 _jni_attach_state = _not_attaching_via_jni;
1536 }
1537 assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1538 }
1539
1540 bool JavaThread::reguard_stack(address cur_sp) {
1541 if (_stack_guard_state != stack_guard_yellow_disabled
1542 && _stack_guard_state != stack_guard_reserved_disabled) {
1543 return true; // Stack already guarded or guard pages not needed.
1544 }
1545
1546 if (register_stack_overflow()) {
1547 // For those architectures which have separate register and
1548 // memory stacks, we must check the register stack to see if
1549 // it has overflowed.
1550 return false;
1551 }
1552
1553 // Java code never executes within the yellow zone: the latter is only
1554 // there to provoke an exception during stack banging. If java code
1555 // is executing there, either StackShadowPages should be larger, or
1556 // some exception code in c1, c2 or the interpreter isn't unwinding
1557 // when it should.
1558 guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
1559 if (_stack_guard_state == stack_guard_yellow_disabled) {
1560 enable_stack_yellow_zone();
1561 if (reserved_stack_activation() != (intptr_t*)stack_base()) {
1562 set_reserved_stack_activation((intptr_t*)stack_base());
1563 }
1564 } else if (_stack_guard_state == stack_guard_reserved_disabled) {
1565 set_reserved_stack_activation((intptr_t*)stack_base());
1566 enable_stack_reserved_zone();
1567 }
1568 return true;
1569 }
1570
1571 bool JavaThread::reguard_stack(void) {
1572 return reguard_stack(os::current_stack_pointer());
1573 }
1574
1575
1576 void JavaThread::block_if_vm_exited() {
1577 if (_terminated == _vm_exited) {
1578 // _vm_exited is set at safepoint, and Threads_lock is never released
1579 // we will block here forever
1580 Threads_lock->lock_without_safepoint_check();
1581 ShouldNotReachHere();
1582 }
1583 }
1584
1585
1586 // Remove this ifdef when C1 is ported to the compiler interface.
1587 static void compiler_thread_entry(JavaThread* thread, TRAPS);
2477
2478 // Sanity check: thread is gone, has started exiting or the thread
2479 // was not externally suspended.
2480 if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
2481 return;
2482 }
2483
2484 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
2485
2486 clear_external_suspend();
2487
2488 if (is_ext_suspended()) {
2489 clear_ext_suspended();
2490 SR_lock()->notify_all();
2491 }
2492 }
2493
2494 void JavaThread::create_stack_guard_pages() {
2495 if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
2496 address low_addr = stack_base() - stack_size();
2497 size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
2498
2499 int allocate = os::allocate_stack_guard_pages();
2500 // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
2501
2502 if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
2503 warning("Attempt to allocate stack guard pages failed.");
2504 return;
2505 }
2506
2507 if (os::guard_memory((char *) low_addr, len)) {
2508 _stack_guard_state = stack_guard_enabled;
2509 } else {
2510 warning("Attempt to protect stack guard pages failed.");
2511 if (os::uncommit_memory((char *) low_addr, len)) {
2512 warning("Attempt to deallocate stack guard pages failed.");
2513 }
2514 }
2515 }
2516
2517 void JavaThread::remove_stack_guard_pages() {
2518 assert(Thread::current() == this, "from different thread");
2519 if (_stack_guard_state == stack_guard_unused) return;
2520 address low_addr = stack_base() - stack_size();
2521 size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
2522
2523 if (os::allocate_stack_guard_pages()) {
2524 if (os::remove_stack_guard_pages((char *) low_addr, len)) {
2525 _stack_guard_state = stack_guard_unused;
2526 } else {
2527 warning("Attempt to deallocate stack guard pages failed.");
2528 }
2529 } else {
2530 if (_stack_guard_state == stack_guard_unused) return;
2531 if (os::unguard_memory((char *) low_addr, len)) {
2532 _stack_guard_state = stack_guard_unused;
2533 } else {
2534 warning("Attempt to unprotect stack guard pages failed.");
2535 }
2536 }
2537 }
2538
2539 void JavaThread::enable_stack_reserved_zone() {
2540 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2541 assert(_stack_guard_state != stack_guard_enabled, "already enabled");
2542
2543 // The base notation is from the stacks point of view, growing downward.
2544 // We need to adjust it to work correctly with guard_memory()
2545 address base = stack_reserved_zone_base() - stack_reserved_zone_size();
2546
2547 guarantee(base < stack_base(),"Error calculating stack reserved zone");
2548 guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone");
2549
2550 if (os::guard_memory((char *) base, stack_reserved_zone_size())) {
2551 _stack_guard_state = stack_guard_enabled;
2552 } else {
2553 warning("Attempt to guard stack reserved zone failed.");
2554 }
2555 enable_register_stack_guard();
2556 }
2557
2558 void JavaThread::disable_stack_reserved_zone() {
2559 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2560 assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled");
2561
2562 // Simply return if called for a thread that does not use guard pages.
2563 if (_stack_guard_state == stack_guard_unused) return;
2564
2565 // The base notation is from the stacks point of view, growing downward.
2566 // We need to adjust it to work correctly with guard_memory()
2567 address base = stack_reserved_zone_base() - stack_reserved_zone_size();
2568
2569 if (os::unguard_memory((char *)base, stack_reserved_zone_size())) {
2570 _stack_guard_state = stack_guard_reserved_disabled;
2571 } else {
2572 warning("Attempt to unguard stack reserved zone failed.");
2573 }
2574 disable_register_stack_guard();
2575 }
2576
2577 void JavaThread::enable_stack_yellow_zone() {
2578 assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2579 assert(_stack_guard_state != stack_guard_enabled, "already enabled");
2580
2581 // The base notation is from the stacks point of view, growing downward.
2582 // We need to adjust it to work correctly with guard_memory()
2583 address base = stack_yellow_zone_base() - stack_yellow_zone_size();
2584
2585 guarantee(base < stack_base(), "Error calculating stack yellow zone");
2586 guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
2587
2588 if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
2589 _stack_guard_state = stack_guard_enabled;
2590 } else {
2591 warning("Attempt to guard stack yellow zone failed.");
2592 }
2593 enable_register_stack_guard();
2594 }
2595
2596 void JavaThread::disable_stack_yellow_zone() {
|