< prev index next >
src/share/vm/runtime/thread.cpp
Print this page
rev 8802 : G1 performance improvements: card batching, joining, sorting, prefetching and write barrier fence elision and simplification based on a global syncrhonization using handshakes piggybacking on thread-local safepoints.
rev 8803 : Implementation improvements to pass JPRT
rev 8806 : Handling some more JPRT complaints
rev 8808 : JPRT fix
*** 56,65 ****
--- 56,66 ----
#include "runtime/commandLineFlagRangeList.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/globals.hpp"
+ #include "runtime/globalSynchronizer.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniPeriodicChecker.hpp"
*** 90,99 ****
--- 91,101 ----
#include "trace/traceMacros.hpp"
#include "trace/tracing.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
+ #include "utilities/hashtable.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#if INCLUDE_ALL_GCS
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
*** 207,216 ****
--- 209,220 ----
set_last_handle_mark(NULL);
// This initial value ==> never claimed.
_oops_do_parity = 0;
+ _java_threads_do_hp = NULL;
+
// the handle mark links itself to last_handle_mark
new HandleMark(this);
// plain initialization
debug_only(_owned_locks = NULL;)
*** 1392,1401 ****
--- 1396,1409 ----
// Initialize fields
// Set the claimed par_id to UINT_MAX (ie not claiming any par_ids)
set_claimed_par_id(UINT_MAX);
+ set_yieldpoint(false);
+ _serialized_memory_version = GlobalSynchronizer::global_serialized_memory_version();
+ _force_yield = false;
+
set_saved_exception_pc(NULL);
set_threadObj(NULL);
_anchor.clear();
set_entry_point(NULL);
set_jni_functions(jni_functions());
*** 1487,1496 ****
--- 1495,1513 ----
_jni_attach_state = _not_attaching_via_jni;
}
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
}
+ void JavaThread::update_serialized_memory_version() {
+ int global_version = GlobalSynchronizer::global_serialized_memory_version();
+ int local_version = OrderAccess::load_acquire(&_serialized_memory_version);
+ if (local_version != global_version) {
+ assert(local_version < global_version, "sanity");
+ OrderAccess::release_store(&_serialized_memory_version, global_version);
+ }
+ }
+
bool JavaThread::reguard_stack(address cur_sp) {
if (_stack_guard_state != stack_guard_yellow_disabled) {
return true; // Stack already guarded or guard pages not needed.
}
*** 1524,1533 ****
--- 1541,1557 ----
Threads_lock->lock_without_safepoint_check();
ShouldNotReachHere();
}
}
+ bool JavaThread::is_online_vm() {
+ return thread_state() == _thread_in_Java;
+ }
+
+ bool JavaThread::is_online_os() {
+ return _osthread->is_online();
+ }
// Remove this ifdef when C1 is ported to the compiler interface.
static void compiler_thread_entry(JavaThread* thread, TRAPS);
static void sweeper_thread_entry(JavaThread* thread, TRAPS);
*** 1662,1672 ****
}
DTRACE_THREAD_PROBE(stop, this);
this->exit(false);
! delete this;
}
static void ensure_join(JavaThread* thread) {
// We do not need to grap the Threads_lock, since we are operating on ourself.
--- 1686,1696 ----
}
DTRACE_THREAD_PROBE(stop, this);
this->exit(false);
! Threads::smr_free(this, false);
}
static void ensure_join(JavaThread* thread) {
// We do not need to grap the Threads_lock, since we are operating on ourself.
*** 1934,1944 ****
flush_barrier_queues();
}
#endif // INCLUDE_ALL_GCS
Threads::remove(this);
! delete this;
}
--- 1958,1968 ----
flush_barrier_queues();
}
#endif // INCLUDE_ALL_GCS
Threads::remove(this);
! Threads::smr_free(this, false);
}
*** 3198,3211 ****
--- 3222,3238 ----
// lock, which is also used in other contexts to protect thread
// operations from having the thread being operated on from exiting
// and going away unexpectedly (e.g., safepoint synchronization)
JavaThread* Threads::_thread_list = NULL;
+ JavaThread* Threads::_thread_smr_list = NULL;
+ JavaThread** Threads::_thread_smr_list_list = NULL;
int Threads::_number_of_threads = 0;
int Threads::_number_of_non_daemon_threads = 0;
int Threads::_return_code = 0;
int Threads::_thread_claim_parity = 0;
+ JavaThread **volatile Threads::_fast_java_thread_list = NULL;
size_t JavaThread::_stack_size_at_create = 0;
#ifdef ASSERT
bool Threads::_vm_complete = false;
#endif
*** 3236,3245 ****
--- 3263,3288 ----
}
// If CompilerThreads ever become non-JavaThreads, add them here
}
+ void Threads::java_threads_do_fast(ThreadClosure *tc, Thread *self) {
+ JavaThread **threads;
+
+ // Stable load of thread list w.r.t. hazard pointer for SMR
+ do {
+ threads = (JavaThread**)OrderAccess::load_ptr_acquire((volatile void*)&_fast_java_thread_list);
+ OrderAccess::release_store_ptr_fence((volatile void*)&self->_java_threads_do_hp, (void*)threads);
+ } while ((JavaThread**)OrderAccess::load_ptr_acquire((volatile void*)&_fast_java_thread_list) != threads);
+ if (threads == NULL) return;
+
+ for (JavaThread **current = threads; *current != NULL; current++) {
+ tc->do_thread(*current);
+ }
+ OrderAccess::release_store_ptr_fence((volatile void*)&self->_java_threads_do_hp, NULL);
+ }
+
void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
TraceTime timer("Initialize java.lang classes", TraceStartupTime);
if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
create_vm_init_libraries();
*** 3395,3405 ****
main_thread->set_active_handles(JNIHandleBlock::allocate_block());
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(
"Failed necessary internal allocation. Out of swap space");
! delete main_thread;
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return JNI_ENOMEM;
}
// Enable guard page *after* os::create_main_thread(), otherwise it would
--- 3438,3448 ----
main_thread->set_active_handles(JNIHandleBlock::allocate_block());
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(
"Failed necessary internal allocation. Out of swap space");
! smr_free(main_thread, false);
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return JNI_ENOMEM;
}
// Enable guard page *after* os::create_main_thread(), otherwise it would
*** 3410,3420 ****
ObjectMonitor::Initialize();
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
! delete main_thread;
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return status;
}
// Should be done after the heap is fully created
--- 3453,3463 ----
ObjectMonitor::Initialize();
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
! smr_free(main_thread, false);
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return status;
}
// Should be done after the heap is fully created
*** 3960,3970 ****
VM_Exit::set_vm_exited();
notify_vm_shutdown();
! delete thread;
// exit_globals() will delete tty
exit_globals();
return true;
--- 4003,4013 ----
VM_Exit::set_vm_exited();
notify_vm_shutdown();
! smr_free(thread, true);
// exit_globals() will delete tty
exit_globals();
return true;
*** 3983,3992 ****
--- 4026,4221 ----
if (version == JNI_VERSION_1_6) return JNI_TRUE;
if (version == JNI_VERSION_1_8) return JNI_TRUE;
return JNI_FALSE;
}
+ class ThreadScanEntry: public BasicHashtableEntry<mtThread> {
+ public:
+ void *_pointer;
+
+ ThreadScanEntry* next() {
+ return (ThreadScanEntry*)BasicHashtableEntry<mtThread>::next();
+ }
+
+ const void* pointer() { return _pointer; }
+ void set_pointer(void* pointer) { _pointer = pointer; }
+ };
+
+ class ThreadScanHashtable : public BasicHashtable<mtThread> {
+ private:
+ inline unsigned int compute_hash(void* pointer) {
+ return (unsigned int)(((uint32_t)(uintptr_t)pointer) * 2654435761u);
+ }
+
+ ThreadScanEntry* bucket(int index) {
+ return (ThreadScanEntry*)BasicHashtable<mtThread>::bucket(index);
+ }
+
+ ThreadScanEntry* get_entry(int index, unsigned int hash, void *pointer) {
+ for (ThreadScanEntry* pp = bucket(index); pp != NULL; pp = pp->next()) {
+ if (pp->hash() == hash &&
+ pp->pointer() == pointer) {
+ return pp;
+ }
+ }
+ return NULL;
+ }
+
+ public:
+ ThreadScanHashtable(int table_size)
+ : BasicHashtable<mtThread>(table_size, sizeof(ThreadScanEntry)) {}
+
+ ThreadScanEntry* get_entry(void *pointer) {
+ unsigned int hash = compute_hash(pointer);
+ return get_entry(hash_to_index(hash), hash, pointer);
+ }
+
+ ThreadScanEntry* new_entry(void *pointer) {
+ unsigned int hash = compute_hash(pointer);
+ ThreadScanEntry* pp;
+ pp = (ThreadScanEntry*)BasicHashtable<mtThread>::new_entry(hash);
+ pp->set_pointer(pointer);
+ return pp;
+ }
+
+ void add_entry(ThreadScanEntry* pp) {
+ int index = hash_to_index(pp->hash());
+ BasicHashtable<mtThread>::add_entry(index, pp);
+ }
+ };
+
+ class ScanHazardPointerThreadClosure: public ThreadClosure {
+ private:
+ ThreadScanHashtable *_table;
+ public:
+ ScanHazardPointerThreadClosure(ThreadScanHashtable *table) : _table(table) {}
+
+ virtual void do_thread(Thread *thread) {
+ assert_locked_or_safepoint(Threads_lock);
+ assert(thread->is_Java_thread(), "sanity");
+ JavaThread *const jthread = reinterpret_cast<JavaThread*>(thread);
+ JavaThread **threads = (JavaThread**)OrderAccess::load_ptr_acquire((volatile void*)&jthread->_java_threads_do_hp);
+ if (threads == NULL) return;
+ for (JavaThread** current = threads; *current != NULL; current++) {
+ JavaThread *p = *current;
+ if (_table->get_entry((void*)p) == NULL) {
+ _table->add_entry(_table->new_entry((void*)p));
+ }
+ }
+ }
+ };
+
+ void Threads::smr_free(JavaThread *thread, bool have_lock) {
+ assert(!have_lock || Threads_lock->is_locked(), "Threads_lock inconsistency");
+ JavaThread *delete_head;
+ if (!have_lock) {
+ MutexLocker ml(Threads_lock);
+ delete_head = smr_free_work(thread);
+ } else {
+ delete_head = smr_free_work(thread);
+ }
+
+ while (delete_head != NULL) {
+ JavaThread *next = delete_head->next();
+ delete delete_head;
+ delete_head = next;
+ }
+ }
+
+ JavaThread *Threads::smr_free_work(JavaThread *thread) {
+ assert(Threads_lock->is_locked(), "Threads_lock should be locked");
+
+ thread->set_next(_thread_smr_list);
+ _thread_smr_list = thread;
+
+ JavaThread *current = _thread_smr_list;
+ JavaThread *prev = NULL;
+ JavaThread *next = NULL;
+ JavaThread *delete_head = NULL;
+
+ ThreadScanHashtable *scan_table = new ThreadScanHashtable(32);
+ ScanHazardPointerThreadClosure scan_cl(scan_table);
+ ALL_JAVA_THREADS(q) {
+ scan_cl.do_thread(q);
+ }
+
+ while (current != NULL) {
+ next = current->next();
+ if (!scan_table->get_entry((void*)current)) {
+ if (prev != NULL) {
+ prev->set_next(next);
+ }
+ if (_thread_smr_list == current) _thread_smr_list = next;
+
+ current->set_next(delete_head);
+ delete_head = current;
+ } else {
+ prev = current;
+ }
+
+ current = next;
+ }
+
+ delete scan_table;
+
+ return delete_head;
+ }
+
+ class ScanHazardPointerThreadsClosure: public ThreadClosure {
+ private:
+ ThreadScanHashtable *_table;
+ public:
+ ScanHazardPointerThreadsClosure(ThreadScanHashtable *table) : _table(table) {}
+
+ virtual void do_thread(Thread *thread) {
+ assert_locked_or_safepoint(Threads_lock);
+ assert(thread->is_Java_thread(), "sanity");
+ JavaThread *const jthread = reinterpret_cast<JavaThread*>(thread);
+ JavaThread **threads = (JavaThread**)OrderAccess::load_ptr_acquire((volatile void*)&jthread->_java_threads_do_hp);
+ if (threads == NULL) return;
+ if (_table->get_entry((void*)threads) == NULL) {
+ _table->add_entry(_table->new_entry((void*)threads));
+ }
+ }
+ };
+
+ void Threads::smr_free_list(JavaThread **threads) {
+ assert(Threads_lock->is_locked(), "Threads_lock should be locked");
+
+ JavaThread ***threads_header_addr = (JavaThread***)(threads - 1);
+
+ *threads_header_addr = _thread_smr_list_list;
+ _thread_smr_list_list = threads;
+
+ JavaThread **current = _thread_smr_list_list;
+ JavaThread **prev = NULL;
+ JavaThread **next = NULL;
+
+ ThreadScanHashtable *scan_table = new ThreadScanHashtable(32);
+ ScanHazardPointerThreadsClosure scan_cl(scan_table);
+ ALL_JAVA_THREADS(q) {
+ scan_cl.do_thread(q);
+ }
+
+ while (current != NULL) {
+ JavaThread ***current_header_addr = (JavaThread***)(current - 1);
+ next = *current_header_addr;
+ if (!scan_table->get_entry((void*)current)) {
+ if (prev != NULL) {
+ JavaThread ***prev_header_addr = (JavaThread***)(prev - 1);
+ // prev->next = current->next
+ *prev_header_addr = *current_header_addr;
+ }
+ if (_thread_smr_list_list == current) _thread_smr_list_list = *current_header_addr;
+ FREE_C_HEAP_ARRAY(JavaThread*, (JavaThread**)current_header_addr);
+ } else {
+ prev = current;
+ }
+
+ current = next;
+ }
+ }
void Threads::add(JavaThread* p, bool force_daemon) {
// The threads lock must be owned at this point
assert_locked_or_safepoint(Threads_lock);
*** 4005,4014 ****
--- 4234,4252 ----
daemon = false;
}
ThreadService::add_thread(p, daemon);
+ JavaThread **new_thread_list = NEW_C_HEAP_ARRAY(JavaThread*, _number_of_threads + 2, mtThread) + 1;
+ int i = 0;
+ ALL_JAVA_THREADS(q) {
+ new_thread_list[i++] = q;
+ }
+ new_thread_list[i] = NULL;
+ JavaThread **old_list = (JavaThread**)Atomic::xchg_ptr((void*)new_thread_list, (volatile void*)&_fast_java_thread_list);
+ if (old_list != NULL) smr_free_list(old_list);
+
// Possible GC point.
Events::log(p, "Thread added: " INTPTR_FORMAT, p);
}
void Threads::remove(JavaThread* p) {
*** 4019,4038 ****
--- 4257,4290 ----
assert(includes(p), "p must be present");
JavaThread* current = _thread_list;
JavaThread* prev = NULL;
+ JavaThread **new_thread_list = NEW_C_HEAP_ARRAY(JavaThread*, _number_of_threads + 1, mtThread) + 1;
+ int i = 0;
+
while (current != p) {
+ new_thread_list[i++] = current;
prev = current;
current = current->next();
}
if (prev) {
prev->set_next(current->next());
} else {
_thread_list = p->next();
}
+
+ current = current->next();
+ while (current != NULL) {
+ new_thread_list[i++] = current;
+ current = current->next();
+ }
+ new_thread_list[i] = NULL;
+ JavaThread **old_list = (JavaThread**)Atomic::xchg_ptr((void*)new_thread_list, (volatile void*)&_fast_java_thread_list);
+ if (old_list != NULL) smr_free_list(old_list);
+
_number_of_threads--;
oop threadObj = p->threadObj();
bool daemon = true;
if (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj)) {
_number_of_non_daemon_threads--;
< prev index next >