247 // mutex, or blocking on an object synchronizer (Java locking).
248 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
249 // If !allow_allocation(), then an assertion failure will happen during allocation
250 // (Hence, !allow_safepoint() => !allow_allocation()).
251 //
252 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
253 //
254 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
255 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
256
257 // Used by SkipGCALot class.
258 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
259
260 friend class No_Alloc_Verifier;
261 friend class No_Safepoint_Verifier;
262 friend class Pause_No_Safepoint_Verifier;
263 friend class ThreadLocalStorage;
264 friend class GC_locker;
265
266 ThreadLocalAllocBuffer _tlab; // Thread-local eden
267 jlong _allocated_bytes; // Cumulative number of bytes allocated on
268 // the Java heap
269
270 TRACE_DATA _trace_data; // Thread-local data for tracing
271
272 ThreadExt _ext;
273
274 int _vm_operation_started_count; // VM_Operation support
275 int _vm_operation_completed_count; // VM_Operation support
276
277 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
278 // is waiting to lock
279 bool _current_pending_monitor_is_from_java; // locking is from Java code
280
281 // ObjectMonitor on which this thread called Object.wait()
282 ObjectMonitor* _current_waiting_monitor;
283
284 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
285 public:
286 ObjectMonitor* omFreeList;
287 int omFreeCount; // length of omFreeList
288 int omFreeProvision; // reload chunk size
289 ObjectMonitor* omInUseList; // SLL to track monitors in circulation
290 int omInUseCount; // length of omInUseList
291
292 #ifdef ASSERT
293 private:
294 bool _visited_for_critical_count;
295
296 public:
297 void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
298 bool was_visited_for_critical_count() const { return _visited_for_critical_count; }
299 #endif
300
301 public:
302 enum {
303 is_definitely_current_thread = true
407 OSThread* osthread() const { return _osthread; }
408 void set_osthread(OSThread* thread) { _osthread = thread; }
409
410 // JNI handle support
411 JNIHandleBlock* active_handles() const { return _active_handles; }
412 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
413 JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
414 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
415
416 // Internal handle support
417 HandleArea* handle_area() const { return _handle_area; }
418 void set_handle_area(HandleArea* area) { _handle_area = area; }
419
420 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
421 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
422
423 // Thread-Local Allocation Buffer (TLAB) support
424 ThreadLocalAllocBuffer& tlab() { return _tlab; }
425 void initialize_tlab() {
426 if (UseTLAB) {
427 tlab().initialize();
428 }
429 }
430
431 jlong allocated_bytes() { return _allocated_bytes; }
432 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
433 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
434 inline jlong cooked_allocated_bytes();
435
436 TRACE_DATA* trace_data() { return &_trace_data; }
437
438 const ThreadExt& ext() const { return _ext; }
439 ThreadExt& ext() { return _ext; }
440
441 // VM operation support
442 int vm_operation_ticket() { return ++_vm_operation_started_count; }
443 int vm_operation_completed_count() { return _vm_operation_completed_count; }
444 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
445
446 // For tracking the heavyweight monitor the thread is pending on.
447 ObjectMonitor* current_pending_monitor() {
448 return _current_pending_monitor;
449 }
450 void set_current_pending_monitor(ObjectMonitor* monitor) {
451 _current_pending_monitor = monitor;
452 }
453 void set_current_pending_monitor_is_from_java(bool from_java) {
454 _current_pending_monitor_is_from_java = from_java;
455 }
456 bool current_pending_monitor_is_from_java() {
457 return _current_pending_monitor_is_from_java;
458 }
459
460 // For tracking the ObjectMonitor on which this thread called Object.wait()
461 ObjectMonitor* current_waiting_monitor() {
462 return _current_waiting_monitor;
463 }
464 void set_current_waiting_monitor(ObjectMonitor* monitor) {
465 _current_waiting_monitor = monitor;
466 }
467
468 // GC support
469 // Apply "f->do_oop" to all root oops in "this".
470 // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
471 // Used by JavaThread::oops_do.
472 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
473 virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
474
475 // Handles the parallel case for the method below.
476 private:
477 bool claim_oops_do_par_case(int collection_parity);
478 public:
479 // Requires that "collection_parity" is that of the current roots
480 // iteration. If "is_par" is false, sets the parity of "this" to
481 // "collection_parity", and returns "true". If "is_par" is true,
482 // uses an atomic instruction to set the current threads parity to
483 // "collection_parity", if it is not already. Returns "true" iff the
484 // calling thread does the update, this indicates that the calling thread
485 // has claimed the thread's stack as a root groop in the current
486 // collection.
487 bool claim_oops_do(bool is_par, int collection_parity) {
597 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
598
599 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
600 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
601
602 #define TLAB_FIELD_OFFSET(name) \
603 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
604
605 TLAB_FIELD_OFFSET(start)
606 TLAB_FIELD_OFFSET(end)
607 TLAB_FIELD_OFFSET(top)
608 TLAB_FIELD_OFFSET(pf_top)
609 TLAB_FIELD_OFFSET(size) // desired_size
610 TLAB_FIELD_OFFSET(refill_waste_limit)
611 TLAB_FIELD_OFFSET(number_of_refills)
612 TLAB_FIELD_OFFSET(fast_refill_waste)
613 TLAB_FIELD_OFFSET(slow_allocations)
614
615 #undef TLAB_FIELD_OFFSET
616
617 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
618
619 public:
620 volatile intptr_t _Stalled;
621 volatile int _TypeTag;
622 ParkEvent * _ParkEvent; // for synchronized()
623 ParkEvent * _SleepEvent; // for Thread.sleep
624 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
625 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
626 int NativeSyncRecursion; // diagnostic
627
628 volatile int _OnTrap; // Resume-at IP delta
629 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
630 jint _hashStateX; // thread-specific hashCode generator state
631 jint _hashStateY;
632 jint _hashStateZ;
633 void * _schedctl;
634
635
636 volatile jint rng[4]; // RNG for spin loop
939 // We use intptr_t instead of address so debugger doesn't try and display strings
940 intptr_t _target;
941 intptr_t _instruction;
942 const char* _file;
943 int _line;
944 } _jmp_ring[jump_ring_buffer_size];
945 #endif // PRODUCT
946
947 #if INCLUDE_ALL_GCS
948 // Support for G1 barriers
949
950 ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier.
951 // Set of all such queues.
952 static SATBMarkQueueSet _satb_mark_queue_set;
953
954 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards.
955 // Set of all such queues.
956 static DirtyCardQueueSet _dirty_card_queue_set;
957
958 void flush_barrier_queues();
959 #endif // INCLUDE_ALL_GCS
960
961 friend class VMThread;
962 friend class ThreadWaitTransition;
963 friend class VM_Exit;
964
965 void initialize(); // Initialized the instance variables
966
967 public:
968 // Constructor
969 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads
970 JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
971 ~JavaThread();
972
973 #ifdef ASSERT
974 // verify this JavaThread hasn't be published in the Threads::list yet
975 void verify_not_published();
976 #endif
977
978 //JNI functiontable getter/setter for JVMTI jni function table interception API.
1358 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); }
1359 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); }
1360 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); }
1361 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); }
1362 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); }
1363 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); }
1364 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
1365 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1366 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1367 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); }
1368 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
1369
1370 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1371 static ByteSize should_post_on_exceptions_flag_offset() {
1372 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1373 }
1374
1375 #if INCLUDE_ALL_GCS
1376 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); }
1377 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); }
1378 #endif // INCLUDE_ALL_GCS
1379
1380 // Returns the jni environment for this thread
1381 JNIEnv* jni_environment() { return &_jni_environment; }
1382
1383 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1384 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1385 // Only return NULL if thread is off the thread list; starting to
1386 // exit should not return NULL.
1387 if (thread_from_jni_env->is_terminated()) {
1388 thread_from_jni_env->block_if_vm_exited();
1389 return NULL;
1390 } else {
1391 return thread_from_jni_env;
1392 }
1393 }
1394
1395 // JNI critical regions. These can nest.
1396 bool in_critical() { return _jni_active_critical > 0; }
1397 bool in_last_critical() { return _jni_active_critical == 1; }
1654 public:
1655 static inline size_t stack_size_at_create(void) {
1656 return _stack_size_at_create;
1657 }
1658 static inline void set_stack_size_at_create(size_t value) {
1659 _stack_size_at_create = value;
1660 }
1661
1662 #if INCLUDE_ALL_GCS
1663 // SATB marking queue support
1664 ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1665 static SATBMarkQueueSet& satb_mark_queue_set() {
1666 return _satb_mark_queue_set;
1667 }
1668
1669 // Dirty card queue support
1670 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1671 static DirtyCardQueueSet& dirty_card_queue_set() {
1672 return _dirty_card_queue_set;
1673 }
1674 #endif // INCLUDE_ALL_GCS
1675
1676 // This method initializes the SATB and dirty card queues before a
1677 // JavaThread is added to the Java thread list. Right now, we don't
1678 // have to do anything to the dirty card queue (it should have been
1679 // activated when the thread was created), but we have to activate
1680 // the SATB queue if the thread is created while a marking cycle is
1681 // in progress. The activation / de-activation of the SATB queues at
1682 // the beginning / end of a marking cycle is done during safepoints
1683 // so we have to make sure this method is called outside one to be
1684 // able to safely read the active field of the SATB queue set. Right
1685 // now, it is called just before the thread is added to the Java
1686 // thread list in the Threads::add() method. That method is holding
1687 // the Threads_lock which ensures we are outside a safepoint. We
1688 // cannot do the obvious and set the active field of the SATB queue
1689 // when the thread is created given that, in some cases, safepoints
1690 // might happen between the JavaThread constructor being called and the
1691 // thread being added to the Java thread list (an example of this is
1692 // when the structure for the DestroyJavaVM thread is created).
1693 #if INCLUDE_ALL_GCS
|
247 // mutex, or blocking on an object synchronizer (Java locking).
248 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
249 // If !allow_allocation(), then an assertion failure will happen during allocation
250 // (Hence, !allow_safepoint() => !allow_allocation()).
251 //
252 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
253 //
254 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
255 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
256
257 // Used by SkipGCALot class.
258 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
259
260 friend class No_Alloc_Verifier;
261 friend class No_Safepoint_Verifier;
262 friend class Pause_No_Safepoint_Verifier;
263 friend class ThreadLocalStorage;
264 friend class GC_locker;
265
266 ThreadLocalAllocBuffer _tlab; // Thread-local eden
267 ThreadLocalAllocBuffer _gclab; // Thread-local allocation buffer for GC (e.g. evacuation)
268 jlong _allocated_bytes; // Cumulative number of bytes allocated on
269 // the Java heap
270 jlong _allocated_bytes_gclab; // Cumulative number of bytes allocated on
271 // the Java heap, in GCLABs
272
273 TRACE_DATA _trace_data; // Thread-local data for tracing
274
275 ThreadExt _ext;
276
277 int _vm_operation_started_count; // VM_Operation support
278 int _vm_operation_completed_count; // VM_Operation support
279
280 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
281 // is waiting to lock
282 bool _current_pending_monitor_is_from_java; // locking is from Java code
283
284 // ObjectMonitor on which this thread called Object.wait()
285 ObjectMonitor* _current_waiting_monitor;
286
287 bool _evacuating;
288
289 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
290 public:
291 ObjectMonitor* omFreeList;
292 int omFreeCount; // length of omFreeList
293 int omFreeProvision; // reload chunk size
294 ObjectMonitor* omInUseList; // SLL to track monitors in circulation
295 int omInUseCount; // length of omInUseList
296
297 #ifdef ASSERT
298 private:
299 bool _visited_for_critical_count;
300
301 public:
302 void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
303 bool was_visited_for_critical_count() const { return _visited_for_critical_count; }
304 #endif
305
306 public:
307 enum {
308 is_definitely_current_thread = true
412 OSThread* osthread() const { return _osthread; }
413 void set_osthread(OSThread* thread) { _osthread = thread; }
414
415 // JNI handle support
416 JNIHandleBlock* active_handles() const { return _active_handles; }
417 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
418 JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
419 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
420
421 // Internal handle support
422 HandleArea* handle_area() const { return _handle_area; }
423 void set_handle_area(HandleArea* area) { _handle_area = area; }
424
425 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
426 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
427
428 // Thread-Local Allocation Buffer (TLAB) support
429 ThreadLocalAllocBuffer& tlab() { return _tlab; }
430 void initialize_tlab() {
431 if (UseTLAB) {
432 tlab().initialize(false);
433 gclab().initialize(true);
434 }
435 }
436
437 // Thread-Local GC Allocation Buffer (GCLAB) support
438 ThreadLocalAllocBuffer& gclab() { return _gclab; }
439
440 jlong allocated_bytes() { return _allocated_bytes; }
441 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
442 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
443 inline jlong cooked_allocated_bytes();
444
445 jlong allocated_bytes_gclab() { return _allocated_bytes_gclab; }
446 void set_allocated_bytes_gclab(jlong value) { _allocated_bytes_gclab = value; }
447 void incr_allocated_bytes_gclab(jlong size) { _allocated_bytes_gclab += size; }
448
449 TRACE_DATA* trace_data() { return &_trace_data; }
450
451 const ThreadExt& ext() const { return _ext; }
452 ThreadExt& ext() { return _ext; }
453
454 // VM operation support
455 int vm_operation_ticket() { return ++_vm_operation_started_count; }
456 int vm_operation_completed_count() { return _vm_operation_completed_count; }
457 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
458
459 // For tracking the heavyweight monitor the thread is pending on.
460 ObjectMonitor* current_pending_monitor() {
461 return _current_pending_monitor;
462 }
463 void set_current_pending_monitor(ObjectMonitor* monitor) {
464 _current_pending_monitor = monitor;
465 }
466 void set_current_pending_monitor_is_from_java(bool from_java) {
467 _current_pending_monitor_is_from_java = from_java;
468 }
469 bool current_pending_monitor_is_from_java() {
470 return _current_pending_monitor_is_from_java;
471 }
472
473 // For tracking the ObjectMonitor on which this thread called Object.wait()
474 ObjectMonitor* current_waiting_monitor() {
475 return _current_waiting_monitor;
476 }
477 void set_current_waiting_monitor(ObjectMonitor* monitor) {
478 _current_waiting_monitor = monitor;
479 }
480
481 bool is_evacuating() {
482 return _evacuating;
483 }
484
485 void set_evacuating(bool evacuating) {
486 _evacuating = evacuating;
487 }
488
489 // GC support
490 // Apply "f->do_oop" to all root oops in "this".
491 // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
492 // Used by JavaThread::oops_do.
493 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
494 virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
495
496 // Handles the parallel case for the method below.
497 private:
498 bool claim_oops_do_par_case(int collection_parity);
499 public:
500 // Requires that "collection_parity" is that of the current roots
501 // iteration. If "is_par" is false, sets the parity of "this" to
502 // "collection_parity", and returns "true". If "is_par" is true,
503 // uses an atomic instruction to set the current threads parity to
504 // "collection_parity", if it is not already. Returns "true" iff the
505 // calling thread does the update, this indicates that the calling thread
506 // has claimed the thread's stack as a root groop in the current
507 // collection.
508 bool claim_oops_do(bool is_par, int collection_parity) {
618 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
619
620 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
621 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
622
623 #define TLAB_FIELD_OFFSET(name) \
624 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
625
626 TLAB_FIELD_OFFSET(start)
627 TLAB_FIELD_OFFSET(end)
628 TLAB_FIELD_OFFSET(top)
629 TLAB_FIELD_OFFSET(pf_top)
630 TLAB_FIELD_OFFSET(size) // desired_size
631 TLAB_FIELD_OFFSET(refill_waste_limit)
632 TLAB_FIELD_OFFSET(number_of_refills)
633 TLAB_FIELD_OFFSET(fast_refill_waste)
634 TLAB_FIELD_OFFSET(slow_allocations)
635
636 #undef TLAB_FIELD_OFFSET
637
638 static ByteSize gclab_start_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::start_offset(); }
639
640 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
641
642 public:
643 volatile intptr_t _Stalled;
644 volatile int _TypeTag;
645 ParkEvent * _ParkEvent; // for synchronized()
646 ParkEvent * _SleepEvent; // for Thread.sleep
647 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
648 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
649 int NativeSyncRecursion; // diagnostic
650
651 volatile int _OnTrap; // Resume-at IP delta
652 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
653 jint _hashStateX; // thread-specific hashCode generator state
654 jint _hashStateY;
655 jint _hashStateZ;
656 void * _schedctl;
657
658
659 volatile jint rng[4]; // RNG for spin loop
962 // We use intptr_t instead of address so debugger doesn't try and display strings
963 intptr_t _target;
964 intptr_t _instruction;
965 const char* _file;
966 int _line;
967 } _jmp_ring[jump_ring_buffer_size];
968 #endif // PRODUCT
969
970 #if INCLUDE_ALL_GCS
971 // Support for G1 barriers
972
973 ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier.
974 // Set of all such queues.
975 static SATBMarkQueueSet _satb_mark_queue_set;
976
977 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards.
978 // Set of all such queues.
979 static DirtyCardQueueSet _dirty_card_queue_set;
980
981 void flush_barrier_queues();
982
983 bool _evacuation_in_progress;
984 static bool _evacuation_in_progress_global;
985
986 #endif // INCLUDE_ALL_GCS
987
988 friend class VMThread;
989 friend class ThreadWaitTransition;
990 friend class VM_Exit;
991
992 void initialize(); // Initialized the instance variables
993
994 public:
995 // Constructor
996 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads
997 JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
998 ~JavaThread();
999
1000 #ifdef ASSERT
1001 // verify this JavaThread hasn't be published in the Threads::list yet
1002 void verify_not_published();
1003 #endif
1004
1005 //JNI functiontable getter/setter for JVMTI jni function table interception API.
1385 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); }
1386 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); }
1387 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); }
1388 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); }
1389 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); }
1390 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); }
1391 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
1392 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1393 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1394 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); }
1395 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
1396
1397 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1398 static ByteSize should_post_on_exceptions_flag_offset() {
1399 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1400 }
1401
1402 #if INCLUDE_ALL_GCS
1403 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); }
1404 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); }
1405
1406 static ByteSize evacuation_in_progress_offset() { return byte_offset_of(JavaThread, _evacuation_in_progress); }
1407
1408 #endif // INCLUDE_ALL_GCS
1409
1410 // Returns the jni environment for this thread
1411 JNIEnv* jni_environment() { return &_jni_environment; }
1412
1413 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1414 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1415 // Only return NULL if thread is off the thread list; starting to
1416 // exit should not return NULL.
1417 if (thread_from_jni_env->is_terminated()) {
1418 thread_from_jni_env->block_if_vm_exited();
1419 return NULL;
1420 } else {
1421 return thread_from_jni_env;
1422 }
1423 }
1424
1425 // JNI critical regions. These can nest.
1426 bool in_critical() { return _jni_active_critical > 0; }
1427 bool in_last_critical() { return _jni_active_critical == 1; }
1684 public:
1685 static inline size_t stack_size_at_create(void) {
1686 return _stack_size_at_create;
1687 }
1688 static inline void set_stack_size_at_create(size_t value) {
1689 _stack_size_at_create = value;
1690 }
1691
1692 #if INCLUDE_ALL_GCS
1693 // SATB marking queue support
1694 ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1695 static SATBMarkQueueSet& satb_mark_queue_set() {
1696 return _satb_mark_queue_set;
1697 }
1698
1699 // Dirty card queue support
1700 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1701 static DirtyCardQueueSet& dirty_card_queue_set() {
1702 return _dirty_card_queue_set;
1703 }
1704
1705 bool evacuation_in_progress() const;
1706
1707 void set_evacuation_in_progress(bool in_prog);
1708
1709 static void set_evacuation_in_progress_all_threads(bool in_prog);
1710 #endif // INCLUDE_ALL_GCS
1711
1712 // This method initializes the SATB and dirty card queues before a
1713 // JavaThread is added to the Java thread list. Right now, we don't
1714 // have to do anything to the dirty card queue (it should have been
1715 // activated when the thread was created), but we have to activate
1716 // the SATB queue if the thread is created while a marking cycle is
1717 // in progress. The activation / de-activation of the SATB queues at
1718 // the beginning / end of a marking cycle is done during safepoints
1719 // so we have to make sure this method is called outside one to be
1720 // able to safely read the active field of the SATB queue set. Right
1721 // now, it is called just before the thread is added to the Java
1722 // thread list in the Threads::add() method. That method is holding
1723 // the Threads_lock which ensures we are outside a safepoint. We
1724 // cannot do the obvious and set the active field of the SATB queue
1725 // when the thread is created given that, in some cases, safepoints
1726 // might happen between the JavaThread constructor being called and the
1727 // thread being added to the Java thread list (an example of this is
1728 // when the structure for the DestroyJavaVM thread is created).
1729 #if INCLUDE_ALL_GCS
|