< prev index next >

src/share/vm/runtime/thread.cpp

Print this page




 154 // Current thread is maintained as a thread-local variable
 155 THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
 156 #endif
 157 // Class hierarchy
 158 // - Thread
 159 //   - VMThread
 160 //   - WatcherThread
 161 //   - ConcurrentMarkSweepThread
 162 //   - JavaThread
 163 //     - CompilerThread
 164 
 165 // ======= Thread ========
 166 // Support for forcing alignment of thread objects for biased locking
 167 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
 168   if (UseBiasedLocking) {
 169     const int alignment = markOopDesc::biased_lock_alignment;
 170     size_t aligned_size = size + (alignment - sizeof(intptr_t));
 171     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
 172                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
 173                                                          AllocFailStrategy::RETURN_NULL);
 174     void* aligned_addr     = align_ptr_up(real_malloc_addr, alignment);
 175     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
 176            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
 177            "JavaThread alignment code overflowed allocated storage");
 178     if (aligned_addr != real_malloc_addr) {
 179       log_info(biasedlocking)("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
 180                               p2i(real_malloc_addr),
 181                               p2i(aligned_addr));
 182     }
 183     ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
 184     return aligned_addr;
 185   } else {
 186     return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
 187                        : AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
 188   }
 189 }
 190 
 191 void Thread::operator delete(void* p) {
 192   if (UseBiasedLocking) {
 193     void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
 194     FreeHeap(real_malloc_addr);


 268   // Note that nascent threads can't use the Native Monitor-Mutex
 269   // construct until the _MutexEvent is initialized ...
 270   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
 271   // we might instead use a stack of ParkEvents that we could provision on-demand.
 272   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 273   // and ::Release()
 274   _ParkEvent   = ParkEvent::Allocate(this);
 275   _SleepEvent  = ParkEvent::Allocate(this);
 276   _MutexEvent  = ParkEvent::Allocate(this);
 277   _MuxEvent    = ParkEvent::Allocate(this);
 278 
 279 #ifdef CHECK_UNHANDLED_OOPS
 280   if (CheckUnhandledOops) {
 281     _unhandled_oops = new UnhandledOops(this);
 282   }
 283 #endif // CHECK_UNHANDLED_OOPS
 284 #ifdef ASSERT
 285   if (UseBiasedLocking) {
 286     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 287     assert(this == _real_malloc_address ||
 288            this == align_ptr_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
 289            "bug in forced alignment of thread objects");
 290   }
 291 #endif // ASSERT
 292 }
 293 
 294 void Thread::initialize_thread_current() {
 295 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 296   assert(_thr_current == NULL, "Thread::current already initialized");
 297   _thr_current = this;
 298 #endif
 299   assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized");
 300   ThreadLocalStorage::set_thread(this);
 301   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 302 }
 303 
 304 void Thread::clear_thread_current() {
 305   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 306 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 307   _thr_current = NULL;
 308 #endif




 154 // Current thread is maintained as a thread-local variable
 155 THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
 156 #endif
 157 // Class hierarchy
 158 // - Thread
 159 //   - VMThread
 160 //   - WatcherThread
 161 //   - ConcurrentMarkSweepThread
 162 //   - JavaThread
 163 //     - CompilerThread
 164 
 165 // ======= Thread ========
 166 // Support for forcing alignment of thread objects for biased locking
 167 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
 168   if (UseBiasedLocking) {
 169     const int alignment = markOopDesc::biased_lock_alignment;
 170     size_t aligned_size = size + (alignment - sizeof(intptr_t));
 171     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
 172                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
 173                                                          AllocFailStrategy::RETURN_NULL);
 174     void* aligned_addr     = align_up(real_malloc_addr, alignment);
 175     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
 176            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
 177            "JavaThread alignment code overflowed allocated storage");
 178     if (aligned_addr != real_malloc_addr) {
 179       log_info(biasedlocking)("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
 180                               p2i(real_malloc_addr),
 181                               p2i(aligned_addr));
 182     }
 183     ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
 184     return aligned_addr;
 185   } else {
 186     return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
 187                        : AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
 188   }
 189 }
 190 
 191 void Thread::operator delete(void* p) {
 192   if (UseBiasedLocking) {
 193     void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
 194     FreeHeap(real_malloc_addr);


 268   // Note that nascent threads can't use the Native Monitor-Mutex
 269   // construct until the _MutexEvent is initialized ...
 270   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
 271   // we might instead use a stack of ParkEvents that we could provision on-demand.
 272   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 273   // and ::Release()
 274   _ParkEvent   = ParkEvent::Allocate(this);
 275   _SleepEvent  = ParkEvent::Allocate(this);
 276   _MutexEvent  = ParkEvent::Allocate(this);
 277   _MuxEvent    = ParkEvent::Allocate(this);
 278 
 279 #ifdef CHECK_UNHANDLED_OOPS
 280   if (CheckUnhandledOops) {
 281     _unhandled_oops = new UnhandledOops(this);
 282   }
 283 #endif // CHECK_UNHANDLED_OOPS
 284 #ifdef ASSERT
 285   if (UseBiasedLocking) {
 286     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 287     assert(this == _real_malloc_address ||
 288            this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
 289            "bug in forced alignment of thread objects");
 290   }
 291 #endif // ASSERT
 292 }
 293 
 294 void Thread::initialize_thread_current() {
 295 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 296   assert(_thr_current == NULL, "Thread::current already initialized");
 297   _thr_current = this;
 298 #endif
 299   assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized");
 300   ThreadLocalStorage::set_thread(this);
 301   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 302 }
 303 
 304 void Thread::clear_thread_current() {
 305   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 306 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 307   _thr_current = NULL;
 308 #endif


< prev index next >