< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page




 277   _hashStateY = 842502087;
 278   _hashStateZ = 0x8767;    // (int)(3579807591LL & 0xffff) ;
 279   _hashStateW = 273326509;
 280 
 281   _OnTrap   = 0;
 282   _Stalled  = 0;
 283   _TypeTag  = 0x2BAD;
 284 
 285   // Many of the following fields are effectively final - immutable
 286   // Note that nascent threads can't use the Native Monitor-Mutex
 287   // construct until the _MutexEvent is initialized ...
 288   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
 289   // we might instead use a stack of ParkEvents that we could provision on-demand.
 290   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 291   // and ::Release()
 292   _ParkEvent   = ParkEvent::Allocate(this);
 293   _SleepEvent  = ParkEvent::Allocate(this);
 294   _MutexEvent  = ParkEvent::Allocate(this);
 295   _MuxEvent    = ParkEvent::Allocate(this);
 296 
 297   _buffered_values_dealiaser = NULL;
 298 
 299 #ifdef CHECK_UNHANDLED_OOPS
 300   if (CheckUnhandledOops) {
 301     _unhandled_oops = new UnhandledOops(this);
 302   }
 303 #endif // CHECK_UNHANDLED_OOPS
 304 #ifdef ASSERT
 305   if (UseBiasedLocking) {
 306     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 307     assert(this == _real_malloc_address ||
 308            this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
 309            "bug in forced alignment of thread objects");
 310   }
 311 #endif // ASSERT
 312 
 313   // Notify the barrier set that a thread is being created. Note that some
 314   // threads are created before a barrier set is available. The call to
 315   // BarrierSet::on_thread_create() for these threads is therefore deferred
 316   // to BarrierSet::set_barrier_set().
 317   BarrierSet* const barrier_set = BarrierSet::barrier_set();
 318   if (barrier_set != NULL) {


 916     st->print(" \"%s\"", name());
 917   }
 918 
 919   st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
 920             p2i(stack_end()), p2i(stack_base()));
 921 
 922   if (osthread()) {
 923     st->print(" [id=%d]", osthread()->thread_id());
 924   }
 925 
 926   ThreadsSMRSupport::print_info_on(this, st);
 927 }
 928 
 929 void Thread::print_value_on(outputStream* st) const {
 930   if (is_Named_thread()) {
 931     st->print(" \"%s\" ", name());
 932   }
 933   st->print(INTPTR_FORMAT, p2i(this));   // print address
 934 }
 935 
 936 void JavaThread::print_vt_buffer_stats_on(outputStream* st) const {
 937   st->print_cr("%s:", this->name());
 938   st->print_cr("\tChunks in use       : %d", vtchunk_in_use());
 939   st->print_cr("\tCached chunk        : %d", local_free_chunk() == NULL ? 0 : 1);
 940   st->print_cr("\tMax chunks          : %d", vtchunk_max());
 941   st->print_cr("\tReturned chunks     : %d", vtchunk_total_returned());
 942   st->print_cr("\tFailed chunk allocs : %d", vtchunk_total_failed());
 943   st->print_cr("\tMemory buffered     : " JLONG_FORMAT, vtchunk_total_memory_buffered());
 944   st->print_cr("");
 945 }
 946 
 947 #ifdef ASSERT
 948 void Thread::print_owned_locks_on(outputStream* st) const {
 949   Monitor *cur = _owned_locks;
 950   if (cur == NULL) {
 951     st->print(" (no locks) ");
 952   } else {
 953     st->print_cr(" Locks owned:");
 954     while (cur) {
 955       cur->print_on(st);
 956       cur = cur->next();
 957     }
 958   }
 959 }
 960 
 961 static int ref_use_count  = 0;
 962 
 963 bool Thread::owns_locks_but_compiled_lock() const {
 964   for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
 965     if (cur != Compile_lock) return true;
 966   }


1593   _parker = Parker::Allocate(this);
1594 
1595 #ifndef PRODUCT
1596   _jmp_ring_index = 0;
1597   for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
1598     record_jump(NULL, NULL, NULL, 0);
1599   }
1600 #endif // PRODUCT
1601 
1602   // Setup safepoint state info for this thread
1603   ThreadSafepointState::create(this);
1604 
1605   debug_only(_java_call_counter = 0);
1606 
1607   // JVMTI PopFrame support
1608   _popframe_condition = popframe_inactive;
1609   _popframe_preserved_args = NULL;
1610   _popframe_preserved_args_size = 0;
1611   _frames_to_pop_failed_realloc = 0;
1612 
1613   // Buffered value types support
1614   _vt_alloc_ptr = NULL;
1615   _vt_alloc_limit = NULL;
1616   _local_free_chunk = NULL;
1617   _current_vtbuffer_mark = VTBuffer::mark_A;
1618   // Buffered value types instrumentation support
1619   _vtchunk_in_use = 0;
1620   _vtchunk_max = 0;
1621   _vtchunk_total_returned = 0;
1622   _vtchunk_total_failed = 0;
1623   _vtchunk_total_memory_buffered = 0;
1624 
1625   if (SafepointMechanism::uses_thread_local_poll()) {
1626     SafepointMechanism::initialize_header(this);
1627   }
1628 
1629   pd_initialize();
1630 }
1631 
1632 JavaThread::JavaThread(bool is_attaching_via_jni) :
1633                        Thread() {
1634   initialize();
1635   if (is_attaching_via_jni) {
1636     _jni_attach_state = _attaching_via_jni;
1637   } else {
1638     _jni_attach_state = _not_attaching_via_jni;
1639   }
1640   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1641 }
1642 
1643 bool JavaThread::reguard_stack(address cur_sp) {
1644   if (_stack_guard_state != stack_guard_yellow_reserved_disabled


1730     delete old_array;
1731   }
1732 
1733   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = deferred_locals();
1734   if (deferred != NULL) {
1735     // This can only happen if thread is destroyed before deoptimization occurs.
1736     assert(deferred->length() != 0, "empty array!");
1737     do {
1738       jvmtiDeferredLocalVariableSet* dlv = deferred->at(0);
1739       deferred->remove_at(0);
1740       // individual jvmtiDeferredLocalVariableSet are CHeapObj's
1741       delete dlv;
1742     } while (deferred->length() != 0);
1743     delete deferred;
1744   }
1745 
1746   // All Java related clean up happens in exit
1747   ThreadSafepointState::destroy(this);
1748   if (_thread_stat != NULL) delete _thread_stat;
1749 
1750   if (_vt_alloc_ptr != NULL) {
1751     VTBufferChunk* chunk = VTBufferChunk::chunk(_vt_alloc_ptr);
1752     while (chunk != NULL) {
1753       VTBufferChunk* temp = chunk->prev();
1754       VTBuffer::recycle_chunk(this, chunk);
1755       chunk = temp;
1756     }
1757     _vt_alloc_ptr = NULL;
1758     _vt_alloc_limit = NULL;
1759   }
1760 
1761 #if INCLUDE_JVMCI
1762   if (JVMCICounterSize > 0) {
1763     if (jvmci_counters_include(this)) {
1764       for (int i = 0; i < JVMCICounterSize; i++) {
1765         _jvmci_old_thread_counters[i] += _jvmci_counters[i];
1766       }
1767     }
1768     FREE_C_HEAP_ARRAY(jlong, _jvmci_counters);
1769   }
1770 #endif // INCLUDE_JVMCI
1771 }
1772 
1773 
1774 // The first routine called by a new Java thread
1775 void JavaThread::run() {
1776   // initialize thread-local alloc buffer related fields
1777   this->initialize_tlab();
1778 
1779   // used to test validity of stack trace backs
1780   this->record_base_of_stack_pointer();


2722   if (!os::guard_memory((char *) base, stack_red_zone_size())) {
2723     warning("Attempt to guard stack red zone failed.");
2724   }
2725 }
2726 
2727 void JavaThread::disable_stack_red_zone() {
2728   // The base notation is from the stacks point of view, growing downward.
2729   // We need to adjust it to work correctly with guard_memory()
2730   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2731   address base = stack_red_zone_base() - stack_red_zone_size();
2732   if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
2733     warning("Attempt to unguard stack red zone failed.");
2734   }
2735 }
2736 
2737 void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
2738   // ignore is there is no stack
2739   if (!has_last_Java_frame()) return;
2740   // Because this method is used to verify oops, it must support
2741   // oops in buffered values
2742   BufferedValuesDealiaser dealiaser(this);
2743 
2744   // traverse the stack frames. Starts from top frame.
2745   for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2746     frame* fr = fst.current();
2747     f(fr, fst.register_map());
2748   }
2749 }
2750 
2751 
2752 #ifndef PRODUCT
2753 // Deoptimization
2754 // Function for testing deoptimization
2755 void JavaThread::deoptimize() {
2756   // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
2757   StackFrameStream fst(this, UseBiasedLocking);
2758   bool deopt = false;           // Dump stack only if a deopt actually happens.
2759   bool only_at = strlen(DeoptimizeOnlyAt) > 0;
2760   // Iterate over all frames in the thread and deoptimize
2761   for (; !fst.is_done(); fst.next()) {
2762     if (fst.current()->can_be_deoptimized()) {


2836     Thread* thread = Thread::current();
2837     if (thread->is_Named_thread()) {
2838       _cur_thr = (NamedThread *)thread;
2839       _cur_thr->set_processed_thread(jthr);
2840     } else {
2841       _cur_thr = NULL;
2842     }
2843   }
2844 
2845   ~RememberProcessedThread() {
2846     if (_cur_thr) {
2847       _cur_thr->set_processed_thread(NULL);
2848     }
2849   }
2850 };
2851 
2852 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
2853   // Verify that the deferred card marks have been flushed.
2854   assert(deferred_card_mark().is_empty(), "Should be empty during GC");
2855 
2856   BufferedValuesDealiaser dealiaser(this);
2857 
2858   // Traverse the GCHandles
2859   Thread::oops_do(f, cf);
2860 
2861   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
2862          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2863 
2864   if (has_last_Java_frame()) {
2865     // Record JavaThread to GC thread
2866     RememberProcessedThread rpt(this);
2867 
2868     // Traverse the privileged stack
2869     if (_privileged_stack_top != NULL) {
2870       _privileged_stack_top->oops_do(f);
2871     }
2872 
2873     // traverse the registered growable array
2874     if (_array_for_gc != NULL) {
2875       for (int index = 0; index < _array_for_gc->length(); index++) {
2876         if (!VTBuffer::is_in_vt_buffer(_array_for_gc->at(index))) {
2877          f->do_oop(_array_for_gc->adr_at(index));
2878         } else {
2879           oop value = _array_for_gc->at(index);
2880           assert(value->is_value(), "Sanity check");
2881           dealiaser.oops_do(f, value);
2882         }
2883       }
2884     }
2885 
2886     // Traverse the monitor chunks
2887     for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
2888       chunk->oops_do(f);
2889     }
2890 
2891     // Traverse the execution stack
2892     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2893       fst.current()->oops_do(f, cf, fst.register_map());
2894     }
2895   }
2896 
2897   // callee_target is never live across a gc point so NULL it here should
2898   // it still contain a methdOop.
2899 
2900   set_callee_target(NULL);
2901 
2902   assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
2903   // If we have deferred set_locals there might be oops waiting to be
2904   // written
2905   GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
2906   if (list != NULL) {
2907     for (int i = 0; i < list->length(); i++) {
2908       list->at(i)->oops_do(f);
2909     }
2910   }
2911 
2912   // Traverse instance variables at the end since the GC may be moving things
2913   // around using this function
2914   f->do_oop((oop*) &_threadObj);
2915   if (!VTBuffer::is_in_vt_buffer(_vm_result)) {
2916     f->do_oop((oop*) &_vm_result);
2917   } else {
2918     assert(_vm_result->is_value(), "Must be a value");
2919     dealiaser.oops_do(f, _vm_result);
2920   }
2921   f->do_oop((oop*) &_exception_oop);
2922   f->do_oop((oop*) &_pending_async_exception);
2923 
2924   if (jvmti_thread_state() != NULL) {
2925     jvmti_thread_state()->oops_do(f);
2926   }
2927 }
2928 
2929 void JavaThread::nmethods_do(CodeBlobClosure* cf) {
2930   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
2931          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2932 
2933   if (has_last_Java_frame()) {
2934     // Traverse the execution stack
2935     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2936       fst.current()->nmethods_do(cf);
2937     }
2938   }
2939 }
2940 


5024     // The following CAS() releases the lock and pops the head element.
5025     // The CAS() also ratifies the previously fetched lock-word value.
5026     if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
5027       continue;
5028     }
5029     List->OnList = 0;
5030     OrderAccess::fence();
5031     List->unpark();
5032     return;
5033   }
5034 }
5035 
5036 
5037 void Threads::verify() {
5038   ALL_JAVA_THREADS(p) {
5039     p->verify();
5040   }
5041   VMThread* thread = VMThread::vm_thread();
5042   if (thread != NULL) thread->verify();
5043 }
5044 
5045 void Threads::print_vt_buffer_stats_on(outputStream* st) {
5046   ALL_JAVA_THREADS(p) {
5047     p->print_vt_buffer_stats_on(st);
5048   }
5049 }


 277   _hashStateY = 842502087;
 278   _hashStateZ = 0x8767;    // (int)(3579807591LL & 0xffff) ;
 279   _hashStateW = 273326509;
 280 
 281   _OnTrap   = 0;
 282   _Stalled  = 0;
 283   _TypeTag  = 0x2BAD;
 284 
 285   // Many of the following fields are effectively final - immutable
 286   // Note that nascent threads can't use the Native Monitor-Mutex
 287   // construct until the _MutexEvent is initialized ...
 288   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
 289   // we might instead use a stack of ParkEvents that we could provision on-demand.
 290   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 291   // and ::Release()
 292   _ParkEvent   = ParkEvent::Allocate(this);
 293   _SleepEvent  = ParkEvent::Allocate(this);
 294   _MutexEvent  = ParkEvent::Allocate(this);
 295   _MuxEvent    = ParkEvent::Allocate(this);
 296 


 297 #ifdef CHECK_UNHANDLED_OOPS
 298   if (CheckUnhandledOops) {
 299     _unhandled_oops = new UnhandledOops(this);
 300   }
 301 #endif // CHECK_UNHANDLED_OOPS
 302 #ifdef ASSERT
 303   if (UseBiasedLocking) {
 304     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 305     assert(this == _real_malloc_address ||
 306            this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
 307            "bug in forced alignment of thread objects");
 308   }
 309 #endif // ASSERT
 310 
 311   // Notify the barrier set that a thread is being created. Note that some
 312   // threads are created before a barrier set is available. The call to
 313   // BarrierSet::on_thread_create() for these threads is therefore deferred
 314   // to BarrierSet::set_barrier_set().
 315   BarrierSet* const barrier_set = BarrierSet::barrier_set();
 316   if (barrier_set != NULL) {


 914     st->print(" \"%s\"", name());
 915   }
 916 
 917   st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
 918             p2i(stack_end()), p2i(stack_base()));
 919 
 920   if (osthread()) {
 921     st->print(" [id=%d]", osthread()->thread_id());
 922   }
 923 
 924   ThreadsSMRSupport::print_info_on(this, st);
 925 }
 926 
 927 void Thread::print_value_on(outputStream* st) const {
 928   if (is_Named_thread()) {
 929     st->print(" \"%s\" ", name());
 930   }
 931   st->print(INTPTR_FORMAT, p2i(this));   // print address
 932 }
 933 











 934 #ifdef ASSERT
 935 void Thread::print_owned_locks_on(outputStream* st) const {
 936   Monitor *cur = _owned_locks;
 937   if (cur == NULL) {
 938     st->print(" (no locks) ");
 939   } else {
 940     st->print_cr(" Locks owned:");
 941     while (cur) {
 942       cur->print_on(st);
 943       cur = cur->next();
 944     }
 945   }
 946 }
 947 
 948 static int ref_use_count  = 0;
 949 
 950 bool Thread::owns_locks_but_compiled_lock() const {
 951   for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
 952     if (cur != Compile_lock) return true;
 953   }


1580   _parker = Parker::Allocate(this);
1581 
1582 #ifndef PRODUCT
1583   _jmp_ring_index = 0;
1584   for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
1585     record_jump(NULL, NULL, NULL, 0);
1586   }
1587 #endif // PRODUCT
1588 
1589   // Setup safepoint state info for this thread
1590   ThreadSafepointState::create(this);
1591 
1592   debug_only(_java_call_counter = 0);
1593 
1594   // JVMTI PopFrame support
1595   _popframe_condition = popframe_inactive;
1596   _popframe_preserved_args = NULL;
1597   _popframe_preserved_args_size = 0;
1598   _frames_to_pop_failed_realloc = 0;
1599 












1600   if (SafepointMechanism::uses_thread_local_poll()) {
1601     SafepointMechanism::initialize_header(this);
1602   }
1603 
1604   pd_initialize();
1605 }
1606 
1607 JavaThread::JavaThread(bool is_attaching_via_jni) :
1608                        Thread() {
1609   initialize();
1610   if (is_attaching_via_jni) {
1611     _jni_attach_state = _attaching_via_jni;
1612   } else {
1613     _jni_attach_state = _not_attaching_via_jni;
1614   }
1615   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1616 }
1617 
1618 bool JavaThread::reguard_stack(address cur_sp) {
1619   if (_stack_guard_state != stack_guard_yellow_reserved_disabled


1705     delete old_array;
1706   }
1707 
1708   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = deferred_locals();
1709   if (deferred != NULL) {
1710     // This can only happen if thread is destroyed before deoptimization occurs.
1711     assert(deferred->length() != 0, "empty array!");
1712     do {
1713       jvmtiDeferredLocalVariableSet* dlv = deferred->at(0);
1714       deferred->remove_at(0);
1715       // individual jvmtiDeferredLocalVariableSet are CHeapObj's
1716       delete dlv;
1717     } while (deferred->length() != 0);
1718     delete deferred;
1719   }
1720 
1721   // All Java related clean up happens in exit
1722   ThreadSafepointState::destroy(this);
1723   if (_thread_stat != NULL) delete _thread_stat;
1724 











1725 #if INCLUDE_JVMCI
1726   if (JVMCICounterSize > 0) {
1727     if (jvmci_counters_include(this)) {
1728       for (int i = 0; i < JVMCICounterSize; i++) {
1729         _jvmci_old_thread_counters[i] += _jvmci_counters[i];
1730       }
1731     }
1732     FREE_C_HEAP_ARRAY(jlong, _jvmci_counters);
1733   }
1734 #endif // INCLUDE_JVMCI
1735 }
1736 
1737 
1738 // The first routine called by a new Java thread
1739 void JavaThread::run() {
1740   // initialize thread-local alloc buffer related fields
1741   this->initialize_tlab();
1742 
1743   // used to test validity of stack trace backs
1744   this->record_base_of_stack_pointer();


2686   if (!os::guard_memory((char *) base, stack_red_zone_size())) {
2687     warning("Attempt to guard stack red zone failed.");
2688   }
2689 }
2690 
2691 void JavaThread::disable_stack_red_zone() {
2692   // The base notation is from the stacks point of view, growing downward.
2693   // We need to adjust it to work correctly with guard_memory()
2694   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
2695   address base = stack_red_zone_base() - stack_red_zone_size();
2696   if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
2697     warning("Attempt to unguard stack red zone failed.");
2698   }
2699 }
2700 
2701 void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
2702   // ignore is there is no stack
2703   if (!has_last_Java_frame()) return;
2704   // Because this method is used to verify oops, it must support
2705   // oops in buffered values

2706 
2707   // traverse the stack frames. Starts from top frame.
2708   for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2709     frame* fr = fst.current();
2710     f(fr, fst.register_map());
2711   }
2712 }
2713 
2714 
2715 #ifndef PRODUCT
2716 // Deoptimization
2717 // Function for testing deoptimization
2718 void JavaThread::deoptimize() {
2719   // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
2720   StackFrameStream fst(this, UseBiasedLocking);
2721   bool deopt = false;           // Dump stack only if a deopt actually happens.
2722   bool only_at = strlen(DeoptimizeOnlyAt) > 0;
2723   // Iterate over all frames in the thread and deoptimize
2724   for (; !fst.is_done(); fst.next()) {
2725     if (fst.current()->can_be_deoptimized()) {


2799     Thread* thread = Thread::current();
2800     if (thread->is_Named_thread()) {
2801       _cur_thr = (NamedThread *)thread;
2802       _cur_thr->set_processed_thread(jthr);
2803     } else {
2804       _cur_thr = NULL;
2805     }
2806   }
2807 
2808   ~RememberProcessedThread() {
2809     if (_cur_thr) {
2810       _cur_thr->set_processed_thread(NULL);
2811     }
2812   }
2813 };
2814 
2815 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
2816   // Verify that the deferred card marks have been flushed.
2817   assert(deferred_card_mark().is_empty(), "Should be empty during GC");
2818 


2819   // Traverse the GCHandles
2820   Thread::oops_do(f, cf);
2821 
2822   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
2823          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2824 
2825   if (has_last_Java_frame()) {
2826     // Record JavaThread to GC thread
2827     RememberProcessedThread rpt(this);
2828 
2829     // Traverse the privileged stack
2830     if (_privileged_stack_top != NULL) {
2831       _privileged_stack_top->oops_do(f);
2832     }
2833 
2834     // traverse the registered growable array
2835     if (_array_for_gc != NULL) {
2836       for (int index = 0; index < _array_for_gc->length(); index++) {

2837         f->do_oop(_array_for_gc->adr_at(index));





2838       }
2839     }
2840 
2841     // Traverse the monitor chunks
2842     for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
2843       chunk->oops_do(f);
2844     }
2845 
2846     // Traverse the execution stack
2847     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2848       fst.current()->oops_do(f, cf, fst.register_map());
2849     }
2850   }
2851 
2852   // callee_target is never live across a gc point so NULL it here should
2853   // it still contain a methdOop.
2854 
2855   set_callee_target(NULL);
2856 
2857   assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
2858   // If we have deferred set_locals there might be oops waiting to be
2859   // written
2860   GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
2861   if (list != NULL) {
2862     for (int i = 0; i < list->length(); i++) {
2863       list->at(i)->oops_do(f);
2864     }
2865   }
2866 
2867   // Traverse instance variables at the end since the GC may be moving things
2868   // around using this function
2869   f->do_oop((oop*) &_threadObj);

2870   f->do_oop((oop*) &_vm_result);




2871   f->do_oop((oop*) &_exception_oop);
2872   f->do_oop((oop*) &_pending_async_exception);
2873 
2874   if (jvmti_thread_state() != NULL) {
2875     jvmti_thread_state()->oops_do(f);
2876   }
2877 }
2878 
2879 void JavaThread::nmethods_do(CodeBlobClosure* cf) {
2880   assert((!has_last_Java_frame() && java_call_counter() == 0) ||
2881          (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
2882 
2883   if (has_last_Java_frame()) {
2884     // Traverse the execution stack
2885     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
2886       fst.current()->nmethods_do(cf);
2887     }
2888   }
2889 }
2890 


4974     // The following CAS() releases the lock and pops the head element.
4975     // The CAS() also ratifies the previously fetched lock-word value.
4976     if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
4977       continue;
4978     }
4979     List->OnList = 0;
4980     OrderAccess::fence();
4981     List->unpark();
4982     return;
4983   }
4984 }
4985 
4986 
4987 void Threads::verify() {
4988   ALL_JAVA_THREADS(p) {
4989     p->verify();
4990   }
4991   VMThread* thread = VMThread::vm_thread();
4992   if (thread != NULL) thread->verify();
4993 }






< prev index next >