732 void Thread::interrupt(Thread* thread) {
733 trace("interrupt", thread);
734 debug_only(check_for_dangling_thread_pointer(thread);)
735 os::interrupt(thread);
736 }
737
738 bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
739 trace("is_interrupted", thread);
740 debug_only(check_for_dangling_thread_pointer(thread);)
741 // Note: If clear_interrupted==false, this simply fetches and
742 // returns the value of the field osthread()->interrupted().
743 return os::is_interrupted(thread, clear_interrupted);
744 }
745
746
747 // GC Support
748 bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
749 jint thread_parity = _oops_do_parity;
750 if (thread_parity != strong_roots_parity) {
751 jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
752 if (res == thread_parity) return true;
753 else {
754 guarantee(res == strong_roots_parity, "Or else what?");
755 assert(SharedHeap::heap()->n_par_threads() > 0,
756 "Should only fail when parallel.");
757 return false;
758 }
759 }
760 assert(SharedHeap::heap()->n_par_threads() > 0,
761 "Should only fail when parallel.");
762 return false;
763 }
764
765 void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
766 active_handles()->oops_do(f);
767 // Do oop for ThreadShadow
768 f->do_oop((oop*)&_pending_exception);
769 handle_area()->oops_do(f);
770 }
771
772 void Thread::nmethods_do(CodeBlobClosure* cf) {
773 // no nmethods in a generic thread...
3888 void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
3889 ALL_JAVA_THREADS(p) {
3890 p->oops_do(f, cf);
3891 }
3892 VMThread::vm_thread()->oops_do(f, cf);
3893 }
3894
3895 void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
3896 // Introduce a mechanism allowing parallel threads to claim threads as
3897 // root groups. Overhead should be small enough to use all the time,
3898 // even in sequential code.
3899 SharedHeap* sh = SharedHeap::heap();
3900 bool is_par = (sh->n_par_threads() > 0);
3901 int cp = SharedHeap::heap()->strong_roots_parity();
3902 ALL_JAVA_THREADS(p) {
3903 if (p->claim_oops_do(is_par, cp)) {
3904 p->oops_do(f, cf);
3905 }
3906 }
3907 VMThread* vmt = VMThread::vm_thread();
3908 if (vmt->claim_oops_do(is_par, cp))
3909 vmt->oops_do(f, cf);
3910 }
3911
3912 #ifndef SERIALGC
3913 // Used by ParallelScavenge
3914 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
3915 ALL_JAVA_THREADS(p) {
3916 q->enqueue(new ThreadRootsTask(p));
3917 }
3918 q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
3919 }
3920
3921 // Used by Parallel Old
3922 void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
3923 ALL_JAVA_THREADS(p) {
3924 q->enqueue(new ThreadRootsMarkingTask(p));
3925 }
3926 q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
3927 }
3928 #endif // SERIALGC
3929
|
732 void Thread::interrupt(Thread* thread) {
733 trace("interrupt", thread);
734 debug_only(check_for_dangling_thread_pointer(thread);)
735 os::interrupt(thread);
736 }
737
738 bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
739 trace("is_interrupted", thread);
740 debug_only(check_for_dangling_thread_pointer(thread);)
741 // Note: If clear_interrupted==false, this simply fetches and
742 // returns the value of the field osthread()->interrupted().
743 return os::is_interrupted(thread, clear_interrupted);
744 }
745
746
747 // GC Support
748 bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
749 jint thread_parity = _oops_do_parity;
750 if (thread_parity != strong_roots_parity) {
751 jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
752 if (res == thread_parity) {
753 return true;
754 } else {
755 guarantee(res == strong_roots_parity, "Or else what?");
756 assert(SharedHeap::heap()->n_par_threads() > 0,
757 "Should only fail when parallel.");
758 return false;
759 }
760 }
761 assert(SharedHeap::heap()->n_par_threads() > 0,
762 "Should only fail when parallel.");
763 return false;
764 }
765
766 void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
767 active_handles()->oops_do(f);
768 // Do oop for ThreadShadow
769 f->do_oop((oop*)&_pending_exception);
770 handle_area()->oops_do(f);
771 }
772
773 void Thread::nmethods_do(CodeBlobClosure* cf) {
774 // no nmethods in a generic thread...
3889 void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
3890 ALL_JAVA_THREADS(p) {
3891 p->oops_do(f, cf);
3892 }
3893 VMThread::vm_thread()->oops_do(f, cf);
3894 }
3895
3896 void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
3897 // Introduce a mechanism allowing parallel threads to claim threads as
3898 // root groups. Overhead should be small enough to use all the time,
3899 // even in sequential code.
3900 SharedHeap* sh = SharedHeap::heap();
3901 bool is_par = (sh->n_par_threads() > 0);
3902 int cp = SharedHeap::heap()->strong_roots_parity();
3903 ALL_JAVA_THREADS(p) {
3904 if (p->claim_oops_do(is_par, cp)) {
3905 p->oops_do(f, cf);
3906 }
3907 }
3908 VMThread* vmt = VMThread::vm_thread();
3909 if (vmt->claim_oops_do(is_par, cp)) {
3910 vmt->oops_do(f, cf);
3911 }
3912 }
3913
3914 #ifndef SERIALGC
3915 // Used by ParallelScavenge
3916 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
3917 ALL_JAVA_THREADS(p) {
3918 q->enqueue(new ThreadRootsTask(p));
3919 }
3920 q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
3921 }
3922
3923 // Used by Parallel Old
3924 void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
3925 ALL_JAVA_THREADS(p) {
3926 q->enqueue(new ThreadRootsMarkingTask(p));
3927 }
3928 q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
3929 }
3930 #endif // SERIALGC
3931
|