< prev index next >

src/share/vm/runtime/thread.cpp

Print this page
rev 13526 : [mq]: 13512.patch


 320   // set up any platform-specific state.
 321   os::initialize_thread(this);
 322 
 323   // Set stack limits after thread is initialized.
 324   if (is_Java_thread()) {
 325     ((JavaThread*) this)->set_stack_overflow_limit();
 326     ((JavaThread*) this)->set_reserved_stack_activation(stack_base());
 327   }
 328 #if INCLUDE_NMT
 329   // record thread's native stack, stack grows downward
 330   MemTracker::record_thread_stack(stack_end(), stack_size());
 331 #endif // INCLUDE_NMT
 332   log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
 333     PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
 334     os::current_thread_id(), p2i(stack_base() - stack_size()),
 335     p2i(stack_base()), stack_size()/1024);
 336 }
 337 
 338 
 339 Thread::~Thread() {



 340   EVENT_THREAD_DESTRUCT(this);
 341 
 342   // stack_base can be NULL if the thread is never started or exited before
 343   // record_stack_base_and_size called. Although, we would like to ensure
 344   // that all started threads do call record_stack_base_and_size(), there is
 345   // not proper way to enforce that.
 346 #if INCLUDE_NMT
 347   if (_stack_base != NULL) {
 348     MemTracker::release_thread_stack(stack_end(), stack_size());
 349 #ifdef ASSERT
 350     set_stack_base(NULL);
 351 #endif
 352   }
 353 #endif // INCLUDE_NMT
 354 
 355   // deallocate data structures
 356   delete resource_area();
 357   // since the handle marks are using the handle area, we have to deallocated the root
 358   // handle mark before deallocating the thread's handle area,
 359   assert(last_handle_mark() != NULL, "check we have an element");


 773 // GC Support
 774 bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
 775   jint thread_parity = _oops_do_parity;
 776   if (thread_parity != strong_roots_parity) {
 777     jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
 778     if (res == thread_parity) {
 779       return true;
 780     } else {
 781       guarantee(res == strong_roots_parity, "Or else what?");
 782       return false;
 783     }
 784   }
 785   return false;
 786 }
 787 
 788 void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
 789   active_handles()->oops_do(f);
 790   // Do oop for ThreadShadow
 791   f->do_oop((oop*)&_pending_exception);
 792   handle_area()->oops_do(f);
 793   if (MonitorInUseLists) {
 794     VM_Operation* op = VMThread::vm_operation();
 795     if (op != NULL && op->deflates_idle_monitors()) {
 796       ObjectSynchronizer::deflate_idle_monitors_and_oops_do(this, f);
 797     } else {
 798       ObjectSynchronizer::thread_local_used_oops_do(this, f);
 799     }
 800   }
 801 }
 802 
 803 void Thread::metadata_handles_do(void f(Metadata*)) {
 804   // Only walk the Handles in Thread.
 805   if (metadata_handles() != NULL) {
 806     for (int i = 0; i< metadata_handles()->length(); i++) {
 807       f(metadata_handles()->at(i));
 808     }
 809   }
 810 }
 811 
 812 void Thread::print_on(outputStream* st) const {
 813   // get_priority assumes osthread initialized
 814   if (osthread() != NULL) {
 815     int os_prio;
 816     if (os::get_native_priority(this, &os_prio) == OS_OK) {
 817       st->print("os_prio=%d ", os_prio);
 818     }
 819     st->print("tid=" INTPTR_FORMAT " ", p2i(this));
 820     ext().print_on(st);


3395   }
3396   // Someday we could have a table or list of all non-JavaThreads.
3397   // For now, just manually iterate through them.
3398   tc->do_thread(VMThread::vm_thread());
3399   Universe::heap()->gc_threads_do(tc);
3400   WatcherThread *wt = WatcherThread::watcher_thread();
3401   // Strictly speaking, the following NULL check isn't sufficient to make sure
3402   // the data for WatcherThread is still valid upon being examined. However,
3403   // considering that WatchThread terminates when the VM is on the way to
3404   // exit at safepoint, the chance of the above is extremely small. The right
3405   // way to prevent termination of WatcherThread would be to acquire
3406   // Terminator_lock, but we can't do that without violating the lock rank
3407   // checking in some cases.
3408   if (wt != NULL) {
3409     tc->do_thread(wt);
3410   }
3411 
3412   // If CompilerThreads ever become non-JavaThreads, add them here
3413 }
3414 
3415 void Threads::parallel_java_threads_do(ThreadClosure* tc) {
3416   int cp = Threads::thread_claim_parity();
3417   ALL_JAVA_THREADS(p) {
3418     if (p->claim_oops_do(true, cp)) {
3419       tc->do_thread(p);
3420     }
3421   }
3422 }
3423 
3424 // The system initialization in the library has three phases.
3425 //
3426 // Phase 1: java.lang.System class initialization
3427 //     java.lang.System is a primordial class loaded and initialized
3428 //     by the VM early during startup.  java.lang.System.<clinit>
3429 //     only does registerNatives and keeps the rest of the class
3430 //     initialization work later until thread initialization completes.
3431 //
3432 //     System.initPhase1 initializes the system properties, the static
3433 //     fields in, out, and err. Set up java signal handlers, OS-specific
3434 //     system settings, and thread group of the main thread.
3435 static void call_initPhase1(TRAPS) {
3436   Klass* k =  SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
3437   instanceKlassHandle klass (THREAD, k);
3438 
3439   JavaValue result(T_VOID);
3440   JavaCalls::call_static(&result, klass, vmSymbols::initPhase1_name(),
3441                                          vmSymbols::void_method_signature(), CHECK);
3442 }
3443 


4281   p->initialize_queues();
4282   p->set_next(_thread_list);
4283   _thread_list = p;
4284   _number_of_threads++;
4285   oop threadObj = p->threadObj();
4286   bool daemon = true;
4287   // Bootstrapping problem: threadObj can be null for initial
4288   // JavaThread (or for threads attached via JNI)
4289   if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
4290     _number_of_non_daemon_threads++;
4291     daemon = false;
4292   }
4293 
4294   ThreadService::add_thread(p, daemon);
4295 
4296   // Possible GC point.
4297   Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
4298 }
4299 
4300 void Threads::remove(JavaThread* p) {
4301 
4302   // Reclaim the objectmonitors from the omInUseList and omFreeList of the moribund thread.
4303   ObjectSynchronizer::omFlush(p);
4304 
4305   // Extra scope needed for Thread_lock, so we can check
4306   // that we do not remove thread without safepoint code notice
4307   { MutexLocker ml(Threads_lock);
4308 
4309     assert(includes(p), "p must be present");
4310 
4311     JavaThread* current = _thread_list;
4312     JavaThread* prev    = NULL;
4313 
4314     while (current != p) {
4315       prev    = current;
4316       current = current->next();
4317     }
4318 
4319     if (prev) {
4320       prev->set_next(current->next());
4321     } else {
4322       _thread_list = p->next();
4323     }
4324     _number_of_threads--;


4375 void Threads::change_thread_claim_parity() {
4376   // Set the new claim parity.
4377   assert(_thread_claim_parity >= 0 && _thread_claim_parity <= 2,
4378          "Not in range.");
4379   _thread_claim_parity++;
4380   if (_thread_claim_parity == 3) _thread_claim_parity = 1;
4381   assert(_thread_claim_parity >= 1 && _thread_claim_parity <= 2,
4382          "Not in range.");
4383 }
4384 
4385 #ifdef ASSERT
4386 void Threads::assert_all_threads_claimed() {
4387   ALL_JAVA_THREADS(p) {
4388     const int thread_parity = p->oops_do_parity();
4389     assert((thread_parity == _thread_claim_parity),
4390            "Thread " PTR_FORMAT " has incorrect parity %d != %d", p2i(p), thread_parity, _thread_claim_parity);
4391   }
4392 }
4393 #endif // ASSERT
4394 
4395 void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf, CodeBlobClosure* nmethods_cl) {
4396   int cp = Threads::thread_claim_parity();
4397   ALL_JAVA_THREADS(p) {
4398     if (p->claim_oops_do(is_par, cp)) {
4399       p->oops_do(f, cf);
4400       if (nmethods_cl != NULL && ! p->is_Code_cache_sweeper_thread()) {
4401         p->nmethods_do(nmethods_cl);
4402       }
4403     }
4404   }
4405   VMThread* vmt = VMThread::vm_thread();
4406   if (vmt->claim_oops_do(is_par, cp)) {
4407     vmt->oops_do(f, cf);
4408   }
4409 }
4410 
4411 #if INCLUDE_ALL_GCS
4412 // Used by ParallelScavenge
4413 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4414   ALL_JAVA_THREADS(p) {
4415     q->enqueue(new ThreadRootsTask(p));
4416   }
4417   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4418 }
4419 
4420 // Used by Parallel Old
4421 void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
4422   ALL_JAVA_THREADS(p) {




 320   // set up any platform-specific state.
 321   os::initialize_thread(this);
 322 
 323   // Set stack limits after thread is initialized.
 324   if (is_Java_thread()) {
 325     ((JavaThread*) this)->set_stack_overflow_limit();
 326     ((JavaThread*) this)->set_reserved_stack_activation(stack_base());
 327   }
 328 #if INCLUDE_NMT
 329   // record thread's native stack, stack grows downward
 330   MemTracker::record_thread_stack(stack_end(), stack_size());
 331 #endif // INCLUDE_NMT
 332   log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
 333     PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
 334     os::current_thread_id(), p2i(stack_base() - stack_size()),
 335     p2i(stack_base()), stack_size()/1024);
 336 }
 337 
 338 
 339 Thread::~Thread() {
 340   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
 341   ObjectSynchronizer::omFlush(this);
 342 
 343   EVENT_THREAD_DESTRUCT(this);
 344 
 345   // stack_base can be NULL if the thread is never started or exited before
 346   // record_stack_base_and_size called. Although, we would like to ensure
 347   // that all started threads do call record_stack_base_and_size(), there is
 348   // not proper way to enforce that.
 349 #if INCLUDE_NMT
 350   if (_stack_base != NULL) {
 351     MemTracker::release_thread_stack(stack_end(), stack_size());
 352 #ifdef ASSERT
 353     set_stack_base(NULL);
 354 #endif
 355   }
 356 #endif // INCLUDE_NMT
 357 
 358   // deallocate data structures
 359   delete resource_area();
 360   // since the handle marks are using the handle area, we have to deallocated the root
 361   // handle mark before deallocating the thread's handle area,
 362   assert(last_handle_mark() != NULL, "check we have an element");


 776 // GC Support
 777 bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
 778   jint thread_parity = _oops_do_parity;
 779   if (thread_parity != strong_roots_parity) {
 780     jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
 781     if (res == thread_parity) {
 782       return true;
 783     } else {
 784       guarantee(res == strong_roots_parity, "Or else what?");
 785       return false;
 786     }
 787   }
 788   return false;
 789 }
 790 
 791 void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
 792   active_handles()->oops_do(f);
 793   // Do oop for ThreadShadow
 794   f->do_oop((oop*)&_pending_exception);
 795   handle_area()->oops_do(f);








 796 }
 797 
 798 void Thread::metadata_handles_do(void f(Metadata*)) {
 799   // Only walk the Handles in Thread.
 800   if (metadata_handles() != NULL) {
 801     for (int i = 0; i< metadata_handles()->length(); i++) {
 802       f(metadata_handles()->at(i));
 803     }
 804   }
 805 }
 806 
 807 void Thread::print_on(outputStream* st) const {
 808   // get_priority assumes osthread initialized
 809   if (osthread() != NULL) {
 810     int os_prio;
 811     if (os::get_native_priority(this, &os_prio) == OS_OK) {
 812       st->print("os_prio=%d ", os_prio);
 813     }
 814     st->print("tid=" INTPTR_FORMAT " ", p2i(this));
 815     ext().print_on(st);


3390   }
3391   // Someday we could have a table or list of all non-JavaThreads.
3392   // For now, just manually iterate through them.
3393   tc->do_thread(VMThread::vm_thread());
3394   Universe::heap()->gc_threads_do(tc);
3395   WatcherThread *wt = WatcherThread::watcher_thread();
3396   // Strictly speaking, the following NULL check isn't sufficient to make sure
3397   // the data for WatcherThread is still valid upon being examined. However,
3398   // considering that WatchThread terminates when the VM is on the way to
3399   // exit at safepoint, the chance of the above is extremely small. The right
3400   // way to prevent termination of WatcherThread would be to acquire
3401   // Terminator_lock, but we can't do that without violating the lock rank
3402   // checking in some cases.
3403   if (wt != NULL) {
3404     tc->do_thread(wt);
3405   }
3406 
3407   // If CompilerThreads ever become non-JavaThreads, add them here
3408 }
3409 









3410 // The system initialization in the library has three phases.
3411 //
3412 // Phase 1: java.lang.System class initialization
3413 //     java.lang.System is a primordial class loaded and initialized
3414 //     by the VM early during startup.  java.lang.System.<clinit>
3415 //     only does registerNatives and keeps the rest of the class
3416 //     initialization work later until thread initialization completes.
3417 //
3418 //     System.initPhase1 initializes the system properties, the static
3419 //     fields in, out, and err. Set up java signal handlers, OS-specific
3420 //     system settings, and thread group of the main thread.
3421 static void call_initPhase1(TRAPS) {
3422   Klass* k =  SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
3423   instanceKlassHandle klass (THREAD, k);
3424 
3425   JavaValue result(T_VOID);
3426   JavaCalls::call_static(&result, klass, vmSymbols::initPhase1_name(),
3427                                          vmSymbols::void_method_signature(), CHECK);
3428 }
3429 


4267   p->initialize_queues();
4268   p->set_next(_thread_list);
4269   _thread_list = p;
4270   _number_of_threads++;
4271   oop threadObj = p->threadObj();
4272   bool daemon = true;
4273   // Bootstrapping problem: threadObj can be null for initial
4274   // JavaThread (or for threads attached via JNI)
4275   if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
4276     _number_of_non_daemon_threads++;
4277     daemon = false;
4278   }
4279 
4280   ThreadService::add_thread(p, daemon);
4281 
4282   // Possible GC point.
4283   Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
4284 }
4285 
4286 void Threads::remove(JavaThread* p) {




4287   // Extra scope needed for Thread_lock, so we can check
4288   // that we do not remove thread without safepoint code notice
4289   { MutexLocker ml(Threads_lock);
4290 
4291     assert(includes(p), "p must be present");
4292 
4293     JavaThread* current = _thread_list;
4294     JavaThread* prev    = NULL;
4295 
4296     while (current != p) {
4297       prev    = current;
4298       current = current->next();
4299     }
4300 
4301     if (prev) {
4302       prev->set_next(current->next());
4303     } else {
4304       _thread_list = p->next();
4305     }
4306     _number_of_threads--;


4357 void Threads::change_thread_claim_parity() {
4358   // Set the new claim parity.
4359   assert(_thread_claim_parity >= 0 && _thread_claim_parity <= 2,
4360          "Not in range.");
4361   _thread_claim_parity++;
4362   if (_thread_claim_parity == 3) _thread_claim_parity = 1;
4363   assert(_thread_claim_parity >= 1 && _thread_claim_parity <= 2,
4364          "Not in range.");
4365 }
4366 
4367 #ifdef ASSERT
4368 void Threads::assert_all_threads_claimed() {
4369   ALL_JAVA_THREADS(p) {
4370     const int thread_parity = p->oops_do_parity();
4371     assert((thread_parity == _thread_claim_parity),
4372            "Thread " PTR_FORMAT " has incorrect parity %d != %d", p2i(p), thread_parity, _thread_claim_parity);
4373   }
4374 }
4375 #endif // ASSERT
4376 
4377 void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf) {
4378   int cp = Threads::thread_claim_parity();
4379   ALL_JAVA_THREADS(p) {
4380     if (p->claim_oops_do(is_par, cp)) {
4381       p->oops_do(f, cf);



4382     }
4383   }
4384   VMThread* vmt = VMThread::vm_thread();
4385   if (vmt->claim_oops_do(is_par, cp)) {
4386     vmt->oops_do(f, cf);
4387   }
4388 }
4389 
4390 #if INCLUDE_ALL_GCS
4391 // Used by ParallelScavenge
4392 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4393   ALL_JAVA_THREADS(p) {
4394     q->enqueue(new ThreadRootsTask(p));
4395   }
4396   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4397 }
4398 
4399 // Used by Parallel Old
4400 void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
4401   ALL_JAVA_THREADS(p) {


< prev index next >