src/share/vm/runtime/thread.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/runtime

src/share/vm/runtime/thread.cpp

Print this page




 306   // store information for the new thread.
 307 
 308   // initialize structure dependent on thread local storage
 309   ThreadLocalStorage::set_thread(this);
 310 }
 311 
 312 void Thread::record_stack_base_and_size() {
 313   set_stack_base(os::current_stack_base());
 314   set_stack_size(os::current_stack_size());
 315   // CR 7190089: on Solaris, primordial thread's stack is adjusted
 316   // in initialize_thread(). Without the adjustment, stack size is
 317   // incorrect if stack is set to unlimited (ulimit -s unlimited).
 318   // So far, only Solaris has real implementation of initialize_thread().
 319   //
 320   // set up any platform-specific state.
 321   os::initialize_thread(this);
 322 
 323 #if INCLUDE_NMT
 324   // record thread's native stack, stack grows downward
 325   address stack_low_addr = stack_base() - stack_size();
 326   MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
 327       CURRENT_PC);
 328 #endif // INCLUDE_NMT
 329 }
 330 
 331 
 332 Thread::~Thread() {
 333   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
 334   ObjectSynchronizer::omFlush (this) ;
 335 
 336   // stack_base can be NULL if the thread is never started or exited before
 337   // record_stack_base_and_size called. Although, we would like to ensure
 338   // that all started threads do call record_stack_base_and_size(), there is
 339   // not proper way to enforce that.
 340 #if INCLUDE_NMT
 341   if (_stack_base != NULL) {
 342     address low_stack_addr = stack_base() - stack_size();
 343     MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);

 344 #ifdef ASSERT
 345     set_stack_base(NULL);
 346 #endif
 347   }
 348 #endif // INCLUDE_NMT
 349 
 350   // deallocate data structures
 351   delete resource_area();
 352   // since the handle marks are using the handle area, we have to deallocated the root
 353   // handle mark before deallocating the thread's handle area,
 354   assert(last_handle_mark() != NULL, "check we have an element");
 355   delete last_handle_mark();
 356   assert(last_handle_mark() == NULL, "check we have reached the end");
 357 
 358   // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
 359   // We NULL out the fields for good hygiene.
 360   ParkEvent::Release (_ParkEvent)   ; _ParkEvent   = NULL ;
 361   ParkEvent::Release (_SleepEvent)  ; _SleepEvent  = NULL ;
 362   ParkEvent::Release (_MutexEvent)  ; _MutexEvent  = NULL ;
 363   ParkEvent::Release (_MuxEvent)    ; _MuxEvent    = NULL ;




 306   // store information for the new thread.
 307 
 308   // initialize structure dependent on thread local storage
 309   ThreadLocalStorage::set_thread(this);
 310 }
 311 
 312 void Thread::record_stack_base_and_size() {
 313   set_stack_base(os::current_stack_base());
 314   set_stack_size(os::current_stack_size());
 315   // CR 7190089: on Solaris, primordial thread's stack is adjusted
 316   // in initialize_thread(). Without the adjustment, stack size is
 317   // incorrect if stack is set to unlimited (ulimit -s unlimited).
 318   // So far, only Solaris has real implementation of initialize_thread().
 319   //
 320   // set up any platform-specific state.
 321   os::initialize_thread(this);
 322 
 323 #if INCLUDE_NMT
 324   // record thread's native stack, stack grows downward
 325   address stack_low_addr = stack_base() - stack_size();
 326   NMTTrackOp op(NMTTrackOp::StackAllocOp, this);
 327   op.execute_op((address)stack_low_addr, stack_size(), mtThreadStack, CURRENT_PC);
 328 #endif // INCLUDE_NMT
 329 }
 330 
 331 
 332 Thread::~Thread() {
 333   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
 334   ObjectSynchronizer::omFlush (this) ;
 335 
 336   // stack_base can be NULL if the thread is never started or exited before
 337   // record_stack_base_and_size called. Although, we would like to ensure
 338   // that all started threads do call record_stack_base_and_size(), there is
 339   // not proper way to enforce that.
 340 #if INCLUDE_NMT
 341   if (_stack_base != NULL) {
 342     address low_stack_addr = stack_base() - stack_size();
 343     NMTTrackOp op(NMTTrackOp::StackReleaseOp, this);
 344     op.execute_op((address)low_stack_addr, stack_size());
 345 #ifdef ASSERT
 346     set_stack_base(NULL);
 347 #endif
 348   }
 349 #endif // INCLUDE_NMT
 350 
 351   // deallocate data structures
 352   delete resource_area();
 353   // since the handle marks are using the handle area, we have to deallocated the root
 354   // handle mark before deallocating the thread's handle area,
 355   assert(last_handle_mark() != NULL, "check we have an element");
 356   delete last_handle_mark();
 357   assert(last_handle_mark() == NULL, "check we have reached the end");
 358 
 359   // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
 360   // We NULL out the fields for good hygiene.
 361   ParkEvent::Release (_ParkEvent)   ; _ParkEvent   = NULL ;
 362   ParkEvent::Release (_SleepEvent)  ; _SleepEvent  = NULL ;
 363   ParkEvent::Release (_MutexEvent)  ; _MutexEvent  = NULL ;
 364   ParkEvent::Release (_MuxEvent)    ; _MuxEvent    = NULL ;


src/share/vm/runtime/thread.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File