# HG changeset patch # User rehn # Date 1528095516 -7200 # Mon Jun 04 08:58:36 2018 +0200 # Node ID cd57d4d6515ef166e633aae233fa27bf336ae841 # Parent 7a013fbf6fc3a0e1aaef16f10de16bfa442c4058 8195097: Make it possible to process StringTable outside safepoint Reviewed-by: diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -29,7 +29,10 @@ #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shared/oopStorage.inline.hpp" +#include "gc/shared/oopStorageParState.inline.hpp" #include "logging/log.hpp" +#include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" #include "memory/metaspaceShared.hpp" @@ -38,170 +41,176 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/weakHandle.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepointVerifiers.hpp" +#include "runtime/timerTrace.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "services/diagnosticCommand.hpp" -#include "utilities/hashtable.inline.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" #include "utilities/macros.hpp" -// the number of buckets a thread claims -const int ClaimChunkSize = 32; - -#ifdef ASSERT -class StableMemoryChecker : public StackObj { - enum { _bufsize = wordSize*4 }; - - address _region; - jint _size; - u1 _save_buf[_bufsize]; - - int sample(u1* save_buf) { - if (_size <= _bufsize) { - memcpy(save_buf, _region, _size); - return _size; - } else { - // copy head and tail - memcpy(&save_buf[0], _region, _bufsize/2); - memcpy(&save_buf[_bufsize/2], _region + _size - _bufsize/2, _bufsize/2); - return (_bufsize/2)*2; - } - } - - public: - StableMemoryChecker(const void* region, jint size) { - _region = (address) region; - _size = size; - sample(_save_buf); - } - - bool verify() { - u1 check_buf[sizeof(_save_buf)]; - int check_size = sample(check_buf); - return (0 == memcmp(_save_buf, check_buf, check_size)); - } - - void set_region(const void* region) { _region = (address) region; } -}; -#endif - +// We prefer short chains of avg 2 +#define PREF_AVG_LIST_LEN 2 +// We start with same old size, consider to reduce this +#define START_SIZE 16 +// 2^24 is max size +#define END_SIZE 24 +// If a chain gets to 32 something might be wrong +#define REHASH_LEN 32 +// If we have as many dead items as 50% of the number of bucket +#define CLEAN_DEAD_HIGH_WATER_MARK 0.5 // -------------------------------------------------------------------------- StringTable* StringTable::_the_table = NULL; bool StringTable::_shared_string_mapped = false; -bool StringTable::_needs_rehashing = false; - -volatile int StringTable::_parallel_claimed_idx = 0; - CompactHashtable StringTable::_shared_table; +bool StringTable::_alt_hash = false; -// Pick hashing algorithm -unsigned int StringTable::hash_string(const jchar* s, int len) { - return use_alternate_hashcode() ? alt_hash_string(s, len) : - java_lang_String::hash_code(s, len); -} - -unsigned int StringTable::alt_hash_string(const jchar* s, int len) { - return AltHashing::murmur3_32(seed(), s, len); -} +static juint murmur_seed = 0; -unsigned int StringTable::hash_string(oop string) { - EXCEPTION_MARK; - if (string == NULL) { - return hash_string((jchar*)NULL, 0); - } - ResourceMark rm(THREAD); - // All String oops are hashed as unicode - int length; - jchar* chars = java_lang_String::as_unicode_string(string, length, THREAD); - if (chars != NULL) { - return hash_string(chars, length); - } else { - vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode string for verification"); - return 0; - } -} - -oop StringTable::string_object(HashtableEntry* entry) { - return RootAccess::oop_load(entry->literal_addr()); -} - -oop StringTable::string_object_no_keepalive(HashtableEntry* entry) { - // The AS_NO_KEEPALIVE peeks at the oop without keeping it alive. - // This is *very dangerous* in general but is okay in this specific - // case. The subsequent oop_load keeps the oop alive if it it matched - // the jchar* string. - return RootAccess::oop_load(entry->literal_addr()); -} - -void StringTable::set_string_object(HashtableEntry* entry, oop string) { - RootAccess::oop_store(entry->literal_addr(), string); -} - -oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { - assert(hash == java_lang_String::hash_code(name, len), - "hash must be computed using java_lang_String::hash_code"); - return _shared_table.lookup((const char*)name, hash, len); +uintx hash_string(const jchar* s, int len, bool useAlt) { + return useAlt ? + AltHashing::murmur3_32(murmur_seed, s, len) : + java_lang_String::hash_code(s, len); } -oop StringTable::lookup_in_main_table(int index, jchar* name, - int len, unsigned int hash) { - int count = 0; - for (HashtableEntry* l = bucket(index); l != NULL; l = l->next()) { - count++; - if (l->hash() == hash) { - if (java_lang_String::equals(string_object_no_keepalive(l), name, len)) { - // We must perform a new load with string_object() that keeps the string - // alive as we must expose the oop as strongly reachable when exiting - // this context, in case the oop gets published. - return string_object(l); - } +class StringTableConfig : public StringTableHash::BaseConfig { + private: + public: + static uintx get_hash(WeakHandle const& value, + bool* is_dead) { + EXCEPTION_MARK; + oop val_oop = value.peek(); + if (val_oop == NULL) { + *is_dead = true; + return 0; + } + *is_dead = false; + ResourceMark rm(THREAD); + // All String oops are hashed as unicode + int length; + jchar* chars = java_lang_String::as_unicode_string(val_oop, length, THREAD); + if (chars != NULL) { + return hash_string(chars, length, StringTable::_alt_hash); } + vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "get hash from oop"); + return 0; } - // If the bucket size is too deep check if this hash code is insufficient. - if (count >= rehash_count && !needs_rehashing()) { - _needs_rehashing = check_rehash_table(count); + // We use default allocation/deallocation but counted + static void* allocate_node(size_t size, + WeakHandle const& value) { + StringTable::item_added(); + return StringTableHash::BaseConfig::allocate_node(size, value); + } + static void free_node(void* memory, + WeakHandle const& value) { + value.release(); + StringTableHash::BaseConfig::free_node(memory, value); + StringTable::item_removed(); + } +}; + +class StringTableLookupJchar { + private: + Thread* _thread; + uintx _hash; + int _len; + const jchar* _str; + Handle _found; + + public: + StringTableLookupJchar(Thread* thread, uintx hash, const jchar* key, int len) + : _thread(thread), _hash(hash), _str(key), _len(len) { + } + uintx get_hash() const { + return _hash; } - return NULL; + bool equals(WeakHandle* value, bool* is_dead) { + oop val_oop = value->peek(); + if (val_oop == NULL) { + // dead oop, mark this hash dead for cleaning + *is_dead = true; + return false; + } + bool equals = java_lang_String::equals(val_oop, (jchar*)_str, _len); + if (!equals) { + return false; + } + // Need to resolve weak handle and Handleize through possible safepoint. + _found = Handle(_thread, value->resolve()); + return true; + } +}; + +class StringTableLookupOop : public StackObj { + private: + Thread* _thread; + uintx _hash; + Handle _find; + Handle _found; // Might be a different oop with the same value that's already + // in the table, which is the point. + public: + StringTableLookupOop(Thread* thread, uintx hash, Handle handle) + : _thread(thread), _hash(hash), _find(handle) { } + + uintx get_hash() const { + return _hash; + } + + bool equals(WeakHandle* value, bool* is_dead) { + oop val_oop = value->peek(); + if (val_oop == NULL) { + // dead oop, mark this hash dead for cleaning + *is_dead = true; + return false; + } + bool equals = java_lang_String::equals(_find(), val_oop); + if (!equals) { + return false; + } + // Need to resolve weak handle and Handleize through possible safepoint. + _found = Handle(_thread, value->resolve()); + return true; + } +}; + +StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), + _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) { + _weak_handles = new OopStorage("StringTable weak", + StringTableWeakAlloc_lock, + StringTableWeakActive_lock); + _local_table = new StringTableHash(START_SIZE, END_SIZE, REHASH_LEN); + _current_size = ((size_t)1) << START_SIZE; } - -oop StringTable::basic_add(int index_arg, Handle string, jchar* name, - int len, unsigned int hashValue_arg, TRAPS) { - - assert(java_lang_String::equals(string(), name, len), - "string must be properly initialized"); - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; +size_t StringTable::item_added() { + return Atomic::add((size_t)1, &(the_table()->_items)); +} - // Check if the symbol table has been rehashed, if so, need to recalculate - // the hash value and index before second lookup. - unsigned int hashValue; - int index; - if (use_alternate_hashcode()) { - hashValue = alt_hash_string(name, len); - index = hash_to_index(hashValue); - } else { - hashValue = hashValue_arg; - index = index_arg; - } - - // Since look-up was done lock-free, we need to check if another - // thread beat us in the race to insert the symbol. - - // No need to lookup the shared table from here since the caller (intern()) already did - oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int) - if (test != NULL) { - // Entry already added - return test; - } - - HashtableEntry* entry = new_entry(hashValue, string()); - add_entry(index, entry); - return string(); +size_t StringTable::items_to_clean(size_t ncl) { + size_t total = Atomic::add((size_t)ncl, &(the_table()->_uncleaned_items)); + log_trace(stringtable)( + "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, + the_table()->_uncleaned_items, ncl, total); + return total; } +void StringTable::item_removed() { + Atomic::add((size_t)-1, &(the_table()->_items)); + Atomic::add((size_t)-1, &(the_table()->_uncleaned_items)); +} + +double StringTable::get_load_factor() { + return (_items*1.0)/_current_size; +} + +double StringTable::get_dead_factor() { + return (_uncleaned_items*1.0)/_current_size; +} oop StringTable::lookup(Symbol* symbol) { ResourceMark rm; @@ -211,69 +220,110 @@ } oop StringTable::lookup(jchar* name, int len) { - // shared table always uses java_lang_String::hash_code unsigned int hash = java_lang_String::hash_code(name, len); - oop string = lookup_shared(name, len, hash); + oop string = StringTable::the_table()->lookup_shared(name, len, hash); if (string != NULL) { return string; } - if (use_alternate_hashcode()) { - hash = alt_hash_string(name, len); + if (StringTable::_alt_hash) { + hash = hash_string(name, len, true); } - int index = the_table()->hash_to_index(hash); - string = the_table()->lookup_in_main_table(index, name, len, hash); - - return string; + return StringTable::the_table()->do_lookup( name, len, hash); } -oop StringTable::intern(Handle string_or_null, jchar* name, - int len, TRAPS) { +class StringTableGet : public StackObj { + Thread* _thread; + Handle _return; + public: + StringTableGet(Thread* thread) : _thread(thread) {} + void operator()(WeakHandle* val) { + oop result = val->resolve(); + assert(result != NULL, "Result should be reachable"); + _return = Handle(_thread, result); + } + oop get_res_oop() { + return _return(); + } +}; + +oop StringTable::do_lookup(jchar* name, int len, uintx hash) { + Thread* thread = Thread::current(); + StringTableLookupJchar lookup(thread, hash, name, len); + StringTableGet stg(thread); + bool rehash_warning; + _local_table->get(thread, lookup, stg, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; + } + return stg.get_res_oop(); +} + +class StringTableCreateEntry : public StackObj { + private: + Thread* _thread; + Handle _return; + Handle _store; + public: + StringTableCreateEntry(Thread* thread, Handle store) + : _thread(thread), _store(store) {} + + WeakHandle operator()() { // No dups found + WeakHandle wh = + WeakHandle::create(_store); + return wh; + } + void operator()(bool inserted, WeakHandle* val) { + oop result = val->resolve(); + assert(result != NULL, "Result should be reachable"); + _return = Handle(_thread, result); + } + oop get_return() const { + return _return(); + } +}; + +oop StringTable::intern(Handle string_or_null_h, jchar* name, int len, TRAPS) { // shared table always uses java_lang_String::hash_code - unsigned int hashValue = java_lang_String::hash_code(name, len); - oop found_string = lookup_shared(name, len, hashValue); + unsigned int hash = java_lang_String::hash_code(name, len); + oop found_string = StringTable::the_table()->lookup_shared(name, len, hash); if (found_string != NULL) { return found_string; } - if (use_alternate_hashcode()) { - hashValue = alt_hash_string(name, len); + if (StringTable::_alt_hash) { + hash = hash_string(name, len, true); } - int index = the_table()->hash_to_index(hashValue); - found_string = the_table()->lookup_in_main_table(index, name, len, hashValue); - - // Found - if (found_string != NULL) { - return found_string; - } + return StringTable::the_table()->do_intern(string_or_null_h, name, len, + hash, CHECK_NULL); +} - debug_only(StableMemoryChecker smc(name, len * sizeof(name[0]))); - assert(!Universe::heap()->is_in_reserved(name), - "proposed name of symbol must be stable"); - +oop StringTable::do_intern(Handle string_or_null_h, jchar* name, + int len, uintx hash, TRAPS) { HandleMark hm(THREAD); // cleanup strings created - Handle string; - // try to reuse the string if possible - if (!string_or_null.is_null()) { - string = string_or_null; + Handle string_h; + + if (!string_or_null_h.is_null()) { + string_h = string_or_null_h; } else { - string = java_lang_String::create_from_unicode(name, len, CHECK_NULL); + string_h = java_lang_String::create_from_unicode(name, len, CHECK_NULL); } // Deduplicate the string before it is interned. Note that we should never // deduplicate a string after it has been interned. Doing so will counteract // compiler optimizations done on e.g. interned string literals. - Universe::heap()->deduplicate_string(string()); + Universe::heap()->deduplicate_string(string_h()); - // Grab the StringTable_lock before getting the_table() because it could - // change at safepoint. - oop added_or_found; - { - MutexLocker ml(StringTable_lock, THREAD); - // Otherwise, add to symbol to table - added_or_found = the_table()->basic_add(index, string, name, len, - hashValue, CHECK_NULL); + assert(java_lang_String::equals(string_h(), name, len), + "string must be properly initialized"); + assert(len == java_lang_String::length(string_h()), "Must be same length"); + StringTableLookupOop lookup(THREAD, hash, string_h); + StringTableCreateEntry stc(THREAD, string_h); + + bool rehash_warning; + _local_table->get_insert_lazy(THREAD, lookup, stc, stc, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; } - - return added_or_found; + return stc.get_return(); } oop StringTable::intern(Symbol* symbol, TRAPS) { @@ -286,19 +336,17 @@ return result; } - -oop StringTable::intern(oop string, TRAPS) -{ +oop StringTable::intern(oop string, TRAPS) { if (string == NULL) return NULL; ResourceMark rm(THREAD); int length; Handle h_string (THREAD, string); - jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL); + jchar* chars = java_lang_String::as_unicode_string(string, length, + CHECK_NULL); oop result = intern(h_string, chars, length, CHECK_NULL); return result; } - oop StringTable::intern(const char* utf8_string, TRAPS) { if (utf8_string == NULL) return NULL; ResourceMark rm(THREAD); @@ -310,340 +358,367 @@ return result; } -void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { - BucketUnlinkContext context; - buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), &context); - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; -} - -void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { - // Readers of the table are unlocked, so we should only be removing - // entries at a safepoint. - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - const int limit = the_table()->table_size(); - - BucketUnlinkContext context; - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; - } - - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, &context); - } - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; +size_t StringTable::table_size(Thread* thread) { + return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread + : Thread::current()); } -void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) { - const int limit = the_table()->table_size(); - - assert(0 <= start_idx && start_idx <= limit, - "start_idx (%d) is out of bounds", start_idx); - assert(0 <= end_idx && end_idx <= limit, - "end_idx (%d) is out of bounds", end_idx); - assert(start_idx <= end_idx, - "Index ordering: start_idx=%d, end_idx=%d", - start_idx, end_idx); +class StringTableIsAliveCounter : public BoolObjectClosure { + BoolObjectClosure* _real_boc; + public: + size_t _count; + size_t _count_total; + StringTableIsAliveCounter(BoolObjectClosure* boc) : _real_boc(boc), _count(0), + _count_total(0) {} + bool do_object_b(oop obj) { + bool ret = _real_boc->do_object_b(obj); + if (!ret) { + ++_count; + } + ++_count_total; + return ret; + } +}; - for (int i = start_idx; i < end_idx; i += 1) { - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - assert(!entry->is_shared(), "CDS not used for the StringTable"); - - f->do_oop((oop*)entry->literal_addr()); - - entry = entry->next(); - } - } +void StringTable::trigger_concurrent_work() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + the_table()->_has_work = true; + Service_lock->notify_all(); } -void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context) { - const int limit = the_table()->table_size(); - - assert(0 <= start_idx && start_idx <= limit, - "start_idx (%d) is out of bounds", start_idx); - assert(0 <= end_idx && end_idx <= limit, - "end_idx (%d) is out of bounds", end_idx); - assert(start_idx <= end_idx, - "Index ordering: start_idx=%d, end_idx=%d", - start_idx, end_idx); +void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, + int* processed, int* removed) { + DoNothingClosure dnc; + assert(is_alive != NULL, "No closure"); + StringTableIsAliveCounter stiac(is_alive); + OopClosure* tmp = f != NULL ? f : &dnc; - for (int i = start_idx; i < end_idx; ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - assert(!entry->is_shared(), "CDS not used for the StringTable"); + StringTable::the_table()->_weak_handles->weak_oops_do(&stiac, tmp); - if (is_alive->do_object_b(string_object_no_keepalive(entry))) { - if (f != NULL) { - f->do_oop(entry->literal_addr()); - } - p = entry->next_addr(); - } else { - *p = entry->next(); - context->free_entry(entry); - } - context->_num_processed++; - entry = *p; - } + StringTable::the_table()->items_to_clean(stiac._count); + StringTable::the_table()->check_concurrent_work(); + if (processed != NULL) { + *processed = (int) stiac._count_total; + } + if (removed != NULL) { + *removed = (int) stiac._count; } } void StringTable::oops_do(OopClosure* f) { - buckets_oops_do(f, 0, the_table()->table_size()); + assert(f != NULL, "No closure"); + StringTable::the_table()->_weak_handles->oops_do(f); +} + +void StringTable::possibly_parallel_unlink( + OopStorage::ParState* _par_state_string, BoolObjectClosure* cl, + int* processed, int* removed) +{ + DoNothingClosure dnc; + assert(cl != NULL, "No closure"); + StringTableIsAliveCounter stiac(cl); + + _par_state_string->weak_oops_do(&stiac, &dnc); + + StringTable::the_table()->items_to_clean(stiac._count); + StringTable::the_table()->check_concurrent_work(); + *processed = (int) stiac._count_total; + *removed = (int) stiac._count; +} + +void StringTable::possibly_parallel_oops_do( + OopStorage::ParState* + _par_state_string, OopClosure* f) +{ + assert(f != NULL, "No closure"); + _par_state_string->oops_do(f); +} + +void StringTable::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + assert(is_alive != NULL, "No closure"); + StringTableIsAliveCounter stiac(is_alive); + weak_storage()->weak_oops_do(is_alive, f); + StringTable::the_table()->items_to_clean(stiac._count); + StringTable::the_table()->check_concurrent_work(); +} + +void StringTable::weak_oops_do(OopClosure* f) { + assert(f != NULL, "No closure"); + weak_storage()->weak_oops_do(f); } -void StringTable::possibly_parallel_oops_do(OopClosure* f) { - const int limit = the_table()->table_size(); +struct StringTableDeleteCheck { + long _count; + long _item; + StringTableDeleteCheck() : _count(0), _item(0) {} + bool operator()(WeakHandle* val) { + ++_item; + oop tmp = val->peek(); + if (tmp == NULL) { + ++_count; + return true; + } else { + return false; + } + } +}; + +struct StringTableDoDelete { + long _count; + StringTableDoDelete() : _count(0) {} + void operator()(WeakHandle* val) { + ++_count; + } +}; + +void StringTable::grow(JavaThread* jt) { + StringTableHash::GrowTask gt(_local_table); + if (!gt.prepare(jt)) { + return; + } + log_trace(stringtable)("Started to growed"); + { + TraceTime timer("Grow", TRACETIME_LOG(Debug, stringtable, perf)); + while(gt.doTask(jt)) { + gt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + gt.cont(jt); + } + } + gt.done(jt); + _current_size = table_size(jt); + log_debug(stringtable)("Growed to size:" SIZE_FORMAT, _current_size); +} - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; +void StringTable::clean_dead_entries(JavaThread* jt) { + StringTableHash::BulkDeleteTask bdt(_local_table); + if (!bdt.prepare(jt)) { + return; + } + + StringTableDeleteCheck stdc; + StringTableDoDelete stdd; + bool interrupted = false; + { + TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf)); + while(bdt.doTask(jt, stdc, stdd)) { + bdt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + if (!bdt.cont(jt)) { + interrupted = true; + break; + } } + } + if (interrupted) { + _has_work = true; + } else { + bdt.done(jt); + } + log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item); +} - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_oops_do(f, start_idx, end_idx); +void StringTable::check_concurrent_work() { + if (_has_work) { + return; + } + double fact = StringTable::get_load_factor(); + double dead_fact = StringTable::get_dead_factor(); + // We should clean/resize if we have more dead than alive, + // more items than preferred load factor or + // more dead items than water mark. + if ((dead_fact > fact) || + (fact > PREF_AVG_LIST_LEN) || + (dead_fact > CLEAN_DEAD_HIGH_WATER_MARK)) { + log_debug(stringtable)("Concurrent work triggered, live factor:%g dead factor:%g", + fact, dead_fact); + trigger_concurrent_work(); } } -// This verification is part of Universe::verify() and needs to be quick. -// See StringTable::verify_and_compare() below for exhaustive verification. -void StringTable::verify() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - oop s = string_object_no_keepalive(p); - guarantee(s != NULL, "interned string is NULL"); - unsigned int h = hash_string(s); - guarantee(p->hash() == h, "broken hash in string table entry"); - guarantee(the_table()->hash_to_index(h) == i, - "wrong index in string table"); - } +void StringTable::concurrent_work(JavaThread* jt) { + _has_work = false; + double fact = get_load_factor(); + log_debug(stringtable, perf)("Concurrent work, live factor: %g", fact); + // We prefer growing, since that also removes dead items + if (fact > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + grow(jt); + } else { + clean_dead_entries(jt); } } +void StringTable::do_concurrent_work(JavaThread* jt) { + StringTable::the_table()->concurrent_work(jt); +} + +bool StringTable::do_rehash() { + if (!_local_table->is_safepoint_safe()) { + return false; + } + + // We use max size + StringTableHash* new_table = new StringTableHash(END_SIZE, END_SIZE, REHASH_LEN); + // Use alt hash from now on + _alt_hash = true; + if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) { + _alt_hash = false; + delete new_table; + return false; + } + + // free old table + delete _local_table; + _local_table = new_table; + + return true; +} + +void StringTable::try_rehash_table() { + static bool rehashed = false; + log_debug(stringtable)("Table imbalanced, rehashing called."); + + // Grow instead of rehash. + if (get_load_factor() > PREF_AVG_LIST_LEN && + !_local_table->is_max_size_reached()) { + log_debug(stringtable)("Choosing growing over rehashing."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + // Already rehashed. + if (rehashed) { + log_warning(stringtable)("Rehashing already done, still long lists."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + + murmur_seed = AltHashing::compute_seed(); + { + if (do_rehash()) { + rehashed = true; + } else { + log_info(stringtable)("Resizes in progress rehashing skipped."); + } + } + _needs_rehashing = false; +} + +void StringTable::rehash_table() { + StringTable::the_table()->try_rehash_table(); +} + +oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { + assert(hash == java_lang_String::hash_code(name, len), + "hash must be computed using java_lang_String::hash_code"); + return _shared_table.lookup((const char*)name, hash, len); +} + +static int literal_size(oop obj) { + // NOTE: this would over-count if (pre-JDK8) + // java_lang_Class::has_offset_field() is true and the String.value array is + // shared by several Strings. However, starting from JDK8, the String.value + // array is not shared anymore. + if (obj == NULL) { + return 0; + } else if (obj->klass() == SystemDictionary::String_klass()) { + return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize; + } else { + return obj->size(); + } +} + +struct SizeFunc { + size_t operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + // Dead + return 0; + } + return literal_size(s); + }; +}; + +void StringTable::print_table_statistics(outputStream* st, + const char* table_name) { + SizeFunc sz; + _local_table->statistics_to(Thread::current(), sz, st, table_name); +} + +class PrintString { + Thread* _thr; + outputStream* _st; + public: + PrintString(Thread* thr, outputStream* st) : _thr(thr), _st(st) {} + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + return true; + } + typeArrayOop value = java_lang_String::value_no_keepalive(s); + int length = java_lang_String::length(s); + bool is_latin1 = java_lang_String::is_latin1(s); + + if (length <= 0) { + _st->print("%d: ", length); + } else { + ResourceMark rm(_thr); + int utf8_length = length; + char* utf8_string; + + if (!is_latin1) { + jchar* chars = value->char_at_addr(0); + utf8_string = UNICODE::as_utf8(chars, utf8_length); + } else { + jbyte* bytes = value->byte_at_addr(0); + utf8_string = UNICODE::as_utf8(bytes, utf8_length); + } + + _st->print("%d: ", utf8_length); + HashtableTextDump::put_utf8(_st, utf8_string, utf8_length); + } + _st->cr(); + return true; + }; +}; + void StringTable::dump(outputStream* st, bool verbose) { if (!verbose) { - the_table()->print_table_statistics(st, "StringTable", string_object_no_keepalive); + the_table()->print_table_statistics(st, "StringTable"); } else { - Thread* THREAD = Thread::current(); + Thread* thr = Thread::current(); + ResourceMark rm(thr); st->print_cr("VERSION: 1.1"); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - oop s = string_object_no_keepalive(p); - typeArrayOop value = java_lang_String::value_no_keepalive(s); - int length = java_lang_String::length(s); - bool is_latin1 = java_lang_String::is_latin1(s); - - if (length <= 0) { - st->print("%d: ", length); - } else { - ResourceMark rm(THREAD); - int utf8_length = length; - char* utf8_string; - - if (!is_latin1) { - jchar* chars = value->char_at_addr(0); - utf8_string = UNICODE::as_utf8(chars, utf8_length); - } else { - jbyte* bytes = value->byte_at_addr(0); - utf8_string = UNICODE::as_utf8(bytes, utf8_length); - } - - st->print("%d: ", utf8_length); - HashtableTextDump::put_utf8(st, utf8_string, utf8_length); - } - st->cr(); - } + PrintString ps(thr, st); + if (!the_table()->_local_table->try_scan(thr, ps)) { + st->print_cr("dump unavailable at this moment"); } } } -StringTable::VerifyRetTypes StringTable::compare_entries( - int bkt1, int e_cnt1, - HashtableEntry* e_ptr1, - int bkt2, int e_cnt2, - HashtableEntry* e_ptr2) { - // These entries are sanity checked by verify_and_compare_entries() - // before this function is called. - oop str1 = string_object_no_keepalive(e_ptr1); - oop str2 = string_object_no_keepalive(e_ptr2); - - if (str1 == str2) { - tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") " - "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]", - p2i(str1), bkt1, e_cnt1, bkt2, e_cnt2); - return _verify_fail_continue; - } - - if (java_lang_String::equals(str1, str2)) { - tty->print_cr("ERROR: identical String values in entry @ " - "bucket[%d][%d] and entry @ bucket[%d][%d]", - bkt1, e_cnt1, bkt2, e_cnt2); - return _verify_fail_continue; - } - - return _verify_pass; -} - -StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt, - HashtableEntry* e_ptr, - StringTable::VerifyMesgModes mesg_mode) { - - VerifyRetTypes ret = _verify_pass; // be optimistic - - oop str = string_object_no_keepalive(e_ptr); - if (str == NULL) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt, - e_cnt); +class VerifyStrings { + public: + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s != NULL) { + assert(java_lang_String::length(s) >= 0, "Length on string must work."); } - // NULL oop means no more verifications are possible - return _verify_fail_done; - } - - if (str->klass() != SystemDictionary::String_klass()) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]", - bkt, e_cnt); - } - // not a String means no more verifications are possible - return _verify_fail_done; - } - - unsigned int h = hash_string(str); - if (e_ptr->hash() != h) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], " - "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h); - } - ret = _verify_fail_continue; - } - - if (the_table()->hash_to_index(h) != bkt) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], " - "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h, - the_table()->hash_to_index(h)); - } - ret = _verify_fail_continue; - } - - return ret; -} + return true; + }; +}; -// See StringTable::verify() above for the quick verification that is -// part of Universe::verify(). This verification is exhaustive and -// reports on every issue that is found. StringTable::verify() only -// reports on the first issue that is found. -// -// StringTable::verify_entry() checks: -// - oop value != NULL (same as verify()) -// - oop value is a String -// - hash(String) == hash in entry (same as verify()) -// - index for hash == index of entry (same as verify()) -// -// StringTable::compare_entries() checks: -// - oops are unique across all entries -// - String values are unique across all entries -// -int StringTable::verify_and_compare_entries() { - assert(StringTable_lock->is_locked(), "sanity check"); - - int fail_cnt = 0; - - // first, verify all the entries individually: - for (int bkt = 0; bkt < the_table()->table_size(); bkt++) { - HashtableEntry* e_ptr = the_table()->bucket(bkt); - for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) { - VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs); - if (ret != _verify_pass) { - fail_cnt++; - } - } +// This verification is part of Universe::verify() and needs to be quick. +void StringTable::verify() { + Thread* thr = Thread::current(); + VerifyStrings vs; + if (!the_table()->_local_table->try_scan(thr, vs)) { + log_info(stringtable)("verify unavailable at this moment"); } - - // Optimization: if the above check did not find any failures, then - // the comparison loop below does not need to call verify_entry() - // before calling compare_entries(). If there were failures, then we - // have to call verify_entry() to see if the entry can be passed to - // compare_entries() safely. When we call verify_entry() in the loop - // below, we do so quietly to void duplicate messages and we don't - // increment fail_cnt because the failures have already been counted. - bool need_entry_verify = (fail_cnt != 0); - - // second, verify all entries relative to each other: - for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) { - HashtableEntry* e_ptr1 = the_table()->bucket(bkt1); - for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) { - if (need_entry_verify) { - VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1, - _verify_quietly); - if (ret == _verify_fail_done) { - // cannot use the current entry to compare against other entries - continue; - } - } - - for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) { - HashtableEntry* e_ptr2 = the_table()->bucket(bkt2); - int e_cnt2; - for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) { - if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) { - // skip the entries up to and including the one that - // we're comparing against - continue; - } - - if (need_entry_verify) { - VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2, - _verify_quietly); - if (ret == _verify_fail_done) { - // cannot compare against this entry - continue; - } - } - - // compare two entries, report and count any failures: - if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2) - != _verify_pass) { - fail_cnt++; - } - } - } - } - } - return fail_cnt; -} - -// Create a new table and using alternate hash code, populate the new table -// with the existing strings. Set flag to use the alternate hash code afterwards. -void StringTable::rehash_table() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - // This should never happen with -Xshare:dump but it might in testing mode. - if (DumpSharedSpaces) return; - StringTable* new_table = new StringTable(); - - // Rehash the table - the_table()->move_to(new_table); - - // Delete the table and buckets (entries are reused in new table). - delete _the_table; - // Don't check if we need rehashing until the table gets unbalanced again. - // Then rehash with a new global seed. - _needs_rehashing = false; - _the_table = new_table; } // Utility for dumping strings @@ -678,7 +753,8 @@ oop new_s = NULL; typeArrayOop v = java_lang_String::value_no_keepalive(s); - typeArrayOop new_v = (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD); + typeArrayOop new_v = + (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD); if (new_v == NULL) { return NULL; } @@ -692,51 +768,51 @@ return new_s; } -bool StringTable::copy_shared_string(GrowableArray *string_space, - CompactStringTableWriter* writer) { +struct CopyArchive { + CompactStringTableWriter* _writer; + CopyArchive(CompactStringTableWriter* writer) : _writer(writer) {} + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + return true; + } + unsigned int hash = java_lang_String::hash_code(s); + if (hash == 0) { + return true; + } + + java_lang_String::set_hash(s, hash); + oop new_s = StringTable::create_archived_string(s, Thread::current()); + if (new_s == NULL) { + return true; + } + + val->replace(new_s); + // add to the compact table + _writer->add(hash, new_s); + return true; + } +}; + +void StringTable::copy_shared_string(CompactStringTableWriter* writer) { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); - Thread* THREAD = Thread::current(); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* bucket = the_table()->bucket(i); - for ( ; bucket != NULL; bucket = bucket->next()) { - oop s = string_object_no_keepalive(bucket); - unsigned int hash = java_lang_String::hash_code(s); - if (hash == 0) { - continue; - } - - java_lang_String::set_hash(s, hash); - oop new_s = create_archived_string(s, THREAD); - if (new_s == NULL) { - continue; - } - - // set the archived string in bucket - set_string_object(bucket, new_s); - - // add to the compact table - writer->add(hash, new_s); - } - } - - return true; + CopyArchive copy(writer); + StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); } -void StringTable::write_to_archive(GrowableArray *string_space) { +void StringTable::write_to_archive() { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); _shared_table.reset(); - int num_buckets = the_table()->number_of_entries() / - SharedSymbolTableBucketSize; + int num_buckets = the_table()->_items / SharedSymbolTableBucketSize; // calculation of num_buckets can result in zero buckets, we need at least one CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1, &MetaspaceShared::stats()->string); // Copy the interned strings into the "string space" within the java heap - if (copy_shared_string(string_space, &writer)) { - writer.dump(&_shared_table); - } + copy_shared_string(&writer); + writer.dump(&_shared_table); } void StringTable::serialize(SerializeClosure* soc) { @@ -744,7 +820,8 @@ _shared_table.serialize(soc); if (soc->writing()) { - _shared_table.reset(); // Sanity. Make sure we don't use the shared table at dump time + // Sanity. Make sure we don't use the shared table at dump time + _shared_table.reset(); } else if (!_shared_string_mapped) { _shared_table.reset(); } @@ -753,4 +830,5 @@ void StringTable::shared_oops_do(OopClosure* f) { _shared_table.oops_do(f); } + #endif //INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -25,109 +25,115 @@ #ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP #define SHARE_VM_CLASSFILE_STRINGTABLE_HPP -#include "utilities/hashtable.hpp" +#include "gc/shared/oopStorage.hpp" +#include "gc/shared/oopStorageParState.hpp" +#include "memory/allocation.hpp" +#include "memory/padded.hpp" +#include "oops/oop.hpp" +#include "oops/weakHandle.hpp" +#include "utilities/concurrentHashTable.hpp" template class CompactHashtable; class CompactStringTableWriter; -class FileMapInfo; class SerializeClosure; -class StringTable : public RehashableHashtable { +class StringTable; +class StringTableConfig; +typedef ConcurrentHashTable, + StringTableConfig, mtSymbol> StringTableHash; + +class StringTableCreateEntry; + +class StringTable : public CHeapObj{ friend class VMStructs; friend class Symbol; + friend class StringTableConfig; + friend class StringTableCreateEntry; private: + void grow(JavaThread* jt); + void clean_dead_entries(JavaThread* jt); + // The string table static StringTable* _the_table; - // Shared string table static CompactHashtable _shared_table; static bool _shared_string_mapped; + static bool _alt_hash; +private: - // Set if one bucket is out of balance due to hash algorithm deficiency - static bool _needs_rehashing; - - // Claimed high water mark for parallel chunked scanning - static volatile int _parallel_claimed_idx; + // Set if one bucket is out of balance due to hash algorithm deficiency + StringTableHash* _local_table; + size_t _current_size; + volatile bool _has_work; + volatile bool _needs_rehashing; - static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS); - oop basic_add(int index, Handle string_or_null, jchar* name, int len, - unsigned int hashValue, TRAPS); + OopStorage* _weak_handles; - oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue); - static oop lookup_shared(jchar* name, int len, unsigned int hash); + volatile size_t _items; + DEFINE_PAD_MINUS_SIZE(1, 64, sizeof(volatile size_t)); + volatile size_t _uncleaned_items; + DEFINE_PAD_MINUS_SIZE(2, 64, sizeof(volatile size_t)); - // Apply the give oop closure to the entries to the buckets - // in the range [start_idx, end_idx). - static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx); + double get_load_factor(); + double get_dead_factor(); - typedef StringTable::BucketUnlinkContext BucketUnlinkContext; - // Unlink or apply the give oop closure to the entries to the buckets - // in the range [start_idx, end_idx). Unlinked bucket entries are collected in the given - // context to be freed later. - // This allows multiple threads to work on the table at once. - static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context); + void check_concurrent_work(); + void trigger_concurrent_work(); - // Hashing algorithm, used as the hash value used by the - // StringTable for bucket selection and comparison (stored in the - // HashtableEntry structures). This is used in the String.intern() method. - static unsigned int hash_string(const jchar* s, int len); - static unsigned int hash_string(oop string); - static unsigned int alt_hash_string(const jchar* s, int len); + static uintx item_added(); + static void item_removed(); + static size_t items_to_clean(size_t ncl); + + StringTable(); - // Accessors for the string roots in the hashtable entries. - // Use string_object_no_keepalive() only when the value is not returned - // outside of a scope where a thread transition is possible. - static oop string_object(HashtableEntry* entry); - static oop string_object_no_keepalive(HashtableEntry* entry); - static void set_string_object(HashtableEntry* entry, oop string); + static oop intern(Handle string_or_null_h, jchar* name, int len, TRAPS); + oop do_intern(Handle string_or_null, jchar* name, int len, uintx hash, TRAPS); + oop do_lookup(jchar* name, int len, uintx hash); - StringTable() : RehashableHashtable((int)StringTableSize, - sizeof (HashtableEntry)) {} + void concurrent_work(JavaThread* jt); + void print_table_statistics(outputStream* st, const char* table_name); - StringTable(HashtableBucket* t, int number_of_entries) - : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry), t, - number_of_entries) {} -public: + void try_rehash_table(); + bool do_rehash(); + + public: // The string table static StringTable* the_table() { return _the_table; } + size_t table_size(Thread* thread = NULL); - // Size of one bucket in the string table. Used when checking for rollover. - static uint bucket_size() { return sizeof(HashtableBucket); } + static OopStorage* weak_storage() { return the_table()->_weak_handles; } static void create_table() { assert(_the_table == NULL, "One string table allowed."); _the_table = new StringTable(); } + static void do_concurrent_work(JavaThread* jt); + static bool has_work() { return the_table()->_has_work; } + // GC support // Delete pointers to otherwise-unreachable objects. - static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) { - int processed = 0; - int removed = 0; - unlink_or_oops_do(cl, f, &processed, &removed); + static void unlink(BoolObjectClosure* cl) { + unlink_or_oops_do(cl); } - static void unlink(BoolObjectClosure* cl) { - int processed = 0; - int removed = 0; - unlink_or_oops_do(cl, NULL, &processed, &removed); - } - static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); - static void unlink(BoolObjectClosure* cl, int* processed, int* removed) { - unlink_or_oops_do(cl, NULL, processed, removed); - } + static void unlink_or_oops_do(BoolObjectClosure* is_alive, + OopClosure* f = NULL, int* processed = NULL, + int* removed = NULL); + // Serially invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); // Possibly parallel versions of the above - static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); - static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) { - possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed); - } - static void possibly_parallel_oops_do(OopClosure* f); + static void possibly_parallel_unlink( + OopStorage::ParState* + _par_state_string, BoolObjectClosure* cl, int* processed, int* removed); + static void possibly_parallel_oops_do( + OopStorage::ParState* + _par_state_string,OopClosure* f); - // Internal test. - static void test_alt_hash() PRODUCT_RETURN; + static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); + static void weak_oops_do(OopClosure* f); // Probing static oop lookup(Symbol* symbol); @@ -138,46 +144,26 @@ static oop intern(oop string, TRAPS); static oop intern(const char *utf8_string, TRAPS); - // Debugging - static void verify(); - static void dump(outputStream* st, bool verbose=false); - - enum VerifyMesgModes { - _verify_quietly = 0, - _verify_with_mesgs = 1 - }; - - enum VerifyRetTypes { - _verify_pass = 0, - _verify_fail_continue = 1, - _verify_fail_done = 2 - }; - - static VerifyRetTypes compare_entries(int bkt1, int e_cnt1, - HashtableEntry* e_ptr1, - int bkt2, int e_cnt2, - HashtableEntry* e_ptr2); - static VerifyRetTypes verify_entry(int bkt, int e_cnt, - HashtableEntry* e_ptr, - VerifyMesgModes mesg_mode); - static int verify_and_compare_entries(); + // Rehash the string table if it gets out of balance + static void rehash_table(); + static bool needs_rehashing() + { return StringTable::the_table()->_needs_rehashing; } // Sharing + oop lookup_shared(jchar* name, int len, unsigned int hash); + static oop create_archived_string(oop s, Thread* THREAD); static void set_shared_string_mapped() { _shared_string_mapped = true; } static bool shared_string_mapped() { return _shared_string_mapped; } static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN; - static bool copy_shared_string(GrowableArray *string_space, - CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN_(false); - static oop create_archived_string(oop s, Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN_(NULL); - static void write_to_archive(GrowableArray *string_space) NOT_CDS_JAVA_HEAP_RETURN; + static void copy_shared_string(CompactStringTableWriter* ch_table) + NOT_CDS_JAVA_HEAP_RETURN; + static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN; static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN; - // Rehash the symbol table if it gets out of balance - static void rehash_table(); - static bool needs_rehashing() { return _needs_rehashing; } + // Jcmd + static void dump(outputStream* st, bool verbose=false); + // Debugging + static void verify(); +}; - // Parallel chunked scanning - static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } - static int parallel_claimed_index() { return _parallel_claimed_idx; } -}; #endif // SHARE_VM_CLASSFILE_STRINGTABLE_HPP diff --git a/src/hotspot/share/gc/cms/cmsHeap.cpp b/src/hotspot/share/gc/cms/cmsHeap.cpp --- a/src/hotspot/share/gc/cms/cmsHeap.cpp +++ b/src/hotspot/share/gc/cms/cmsHeap.cpp @@ -220,13 +220,14 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations); CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure); if (!only_strong_roots) { - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); } if (young_gen_as_roots && diff --git a/src/hotspot/share/gc/cms/cmsHeap.hpp b/src/hotspot/share/gc/cms/cmsHeap.hpp --- a/src/hotspot/share/gc/cms/cmsHeap.hpp +++ b/src/hotspot/share/gc/cms/cmsHeap.hpp @@ -30,6 +30,7 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/genCollectedHeap.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "utilities/growableArray.hpp" class CLDClosure; @@ -90,7 +91,8 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); GCMemoryManager* old_manager() const { return _old_manager; } diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp @@ -54,6 +54,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/strongRootsScope.hpp" @@ -2769,10 +2770,12 @@ protected: CMSCollector* _collector; uint _n_workers; + OopStorage::ParState _par_state_string; CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) : AbstractGangTask(name), _collector(collector), - _n_workers(n_workers) {} + _n_workers(n_workers), + _par_state_string(StringTable::weak_storage()) {} // Work method in support of parallel rescan ... of young gen spaces void do_young_space_rescan(OopsInGenClosure* cl, ContiguousSpace* space, @@ -4274,7 +4277,9 @@ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl, - &cld_closure); + &cld_closure, + &_par_state_string); + assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); @@ -4403,7 +4408,8 @@ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl, - NULL); // The dirty klasses will be handled below + NULL, // The dirty klasses will be handled below + &_par_state_string); assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp --- a/src/hotspot/share/gc/cms/parNewGeneration.cpp +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/stringTable.hpp" #include "gc/cms/cmsHeap.inline.hpp" #include "gc/cms/compactibleFreeListSpace.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp" @@ -589,7 +590,8 @@ _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set), - _strong_roots_scope(strong_roots_scope) + _strong_roots_scope(strong_roots_scope), + _par_state_string(StringTable::weak_storage()) {} void ParNewGenTask::work(uint worker_id) { @@ -611,7 +613,8 @@ heap->young_process_roots(_strong_roots_scope, &par_scan_state.to_space_root_closure(), &par_scan_state.older_gen_closure(), - &cld_scan_closure); + &cld_scan_closure, + &_par_state_string); par_scan_state.end_strong_roots(); diff --git a/src/hotspot/share/gc/cms/parNewGeneration.hpp b/src/hotspot/share/gc/cms/parNewGeneration.hpp --- a/src/hotspot/share/gc/cms/parNewGeneration.hpp +++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp @@ -29,6 +29,7 @@ #include "gc/serial/defNewGeneration.hpp" #include "gc/shared/copyFailedInfo.hpp" #include "gc/shared/gcTrace.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/plab.hpp" #include "gc/shared/preservedMarks.hpp" #include "gc/shared/taskqueue.hpp" @@ -236,6 +237,7 @@ HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; StrongRootsScope* _strong_roots_scope; + OopStorage::ParState _par_state_string; public: ParNewGenTask(ParNewGeneration* young_gen, diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -69,6 +69,7 @@ #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/generationSpec.hpp" #include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/referenceProcessor.inline.hpp" @@ -3218,6 +3219,7 @@ private: BoolObjectClosure* _is_alive; G1StringDedupUnlinkOrOopsDoClosure _dedup_closure; + OopStorage::ParState _par_state_string; int _initial_string_table_size; int _initial_symbol_table_size; @@ -3237,24 +3239,19 @@ AbstractGangTask("String/Symbol Unlinking"), _is_alive(is_alive), _dedup_closure(is_alive, NULL, false), + _par_state_string(StringTable::weak_storage()), _process_strings(process_strings), _strings_processed(0), _strings_removed(0), _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0), _process_string_dedup(process_string_dedup) { - _initial_string_table_size = StringTable::the_table()->table_size(); + _initial_string_table_size = (int) StringTable::the_table()->table_size(); _initial_symbol_table_size = SymbolTable::the_table()->table_size(); - if (process_strings) { - StringTable::clear_parallel_claimed_index(); - } if (process_symbols) { SymbolTable::clear_parallel_claimed_index(); } } ~G1StringAndSymbolCleaningTask() { - guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size, - "claim value %d after unlink less than initial string table size %d", - StringTable::parallel_claimed_index(), _initial_string_table_size); guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); @@ -3273,7 +3270,7 @@ int symbols_processed = 0; int symbols_removed = 0; if (_process_strings) { - StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed); + StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); Atomic::add(strings_processed, &_strings_processed); Atomic::add(strings_removed, &_strings_removed); } diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp --- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp @@ -38,6 +38,7 @@ #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/heapRegion.inline.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/weakProcessor.hpp" #include "memory/allocation.inline.hpp" @@ -72,6 +73,7 @@ _process_strong_tasks(G1RP_PS_NumElements), _srs(n_workers), _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never), + _par_state_string(StringTable::weak_storage()), _n_workers_discovered_strong_classes(0) {} void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) { @@ -301,7 +303,7 @@ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i); // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. - StringTable::possibly_parallel_oops_do(closures->weak_oops()); + StringTable::possibly_parallel_oops_do(&_par_state_string, closures->weak_oops()); } void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure, diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.hpp b/src/hotspot/share/gc/g1/g1RootProcessor.hpp --- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp +++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP #define SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/strongRootsScope.hpp" #include "memory/allocation.hpp" #include "runtime/mutex.hpp" @@ -49,6 +50,7 @@ G1CollectedHeap* _g1h; SubTasksDone _process_strong_tasks; StrongRootsScope _srs; + OopStorage::ParState _par_state_string; // Used to implement the Thread work barrier. Monitor _lock; diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -44,6 +44,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/generationSpec.hpp" +#include "gc/shared/oopStorageParState.inline.hpp" #include "gc/shared/space.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/vmGCOperations.hpp" @@ -851,12 +852,17 @@ } void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope, - OopClosure* root_closure) { + OopClosure* root_closure, + OopStorage::ParState* par_state_string) { assert(root_closure != NULL, "Must be set"); // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. - if (scope->n_threads() > 1) { - StringTable::possibly_parallel_oops_do(root_closure); + + // Either we should be single threaded or have a ParState + assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but not ParState"); + + if (scope->n_threads() > 1 && par_state_string != NULL) { + StringTable::possibly_parallel_oops_do(par_state_string, root_closure); } else { StringTable::oops_do(root_closure); } @@ -865,12 +871,13 @@ void GenCollectedHeap::young_process_roots(StrongRootsScope* scope, OopsInGenClosure* root_closure, OopsInGenClosure* old_gen_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations); process_roots(scope, SO_ScavengeCodeCache, root_closure, cld_closure, cld_closure, &mark_code_closure); - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { root_closure->reset_generation(); @@ -890,7 +897,8 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase); CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; @@ -899,7 +907,7 @@ // We never treat the string table as roots during marking // for the full gc, so we only need to process it during // the adjust phase. - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); } _process_strong_tasks->all_tasks_completed(scope->n_threads()); diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp @@ -28,6 +28,7 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/generation.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/softRefGenPolicy.hpp" class AdaptiveSizePolicy; @@ -401,7 +402,8 @@ CodeBlobToOopClosure* code_roots); void process_string_table_roots(StrongRootsScope* scope, - OopClosure* root_closure); + OopClosure* root_closure, + OopStorage::ParState* par_state_string); // Accessor for memory state verification support NOT_PRODUCT( @@ -415,14 +417,16 @@ void young_process_roots(StrongRootsScope* scope, OopsInGenClosure* root_closure, OopsInGenClosure* old_gen_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); void full_process_roots(StrongRootsScope* scope, bool is_adjust_phase, ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); // Apply "root_closure" to all the weak roots of the system. // These include JNI weak roots, string table, diff --git a/src/hotspot/share/gc/shared/strongRootsScope.cpp b/src/hotspot/share/gc/shared/strongRootsScope.cpp --- a/src/hotspot/share/gc/shared/strongRootsScope.cpp +++ b/src/hotspot/share/gc/shared/strongRootsScope.cpp @@ -38,8 +38,6 @@ StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) { Threads::change_thread_claim_parity(); - // Zero the claimed high water mark in the StringTable - StringTable::clear_parallel_claimed_index(); } StrongRootsScope::~StrongRootsScope() { diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp +++ b/src/hotspot/share/memory/metaspaceShared.cpp @@ -1841,7 +1841,7 @@ G1CollectedHeap::heap()->begin_archive_alloc_range(); // Archive interned string objects - StringTable::write_to_archive(closed_archive); + StringTable::write_to_archive(); G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, os::vm_allocation_granularity()); diff --git a/src/hotspot/share/oops/weakHandle.cpp b/src/hotspot/share/oops/weakHandle.cpp --- a/src/hotspot/share/oops/weakHandle.cpp +++ b/src/hotspot/share/oops/weakHandle.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "classfile/stringTable.hpp" #include "gc/shared/oopStorage.hpp" #include "oops/access.inline.hpp" #include "oops/oop.hpp" @@ -35,6 +36,10 @@ return SystemDictionary::vm_weak_oop_storage(); } +template <> OopStorage* WeakHandle::get_storage() { + return StringTable::weak_storage(); +} + template WeakHandle WeakHandle::create(Handle obj) { assert(obj() != NULL, "no need to create weak null oop"); @@ -68,4 +73,5 @@ // Provide instantiation. template class WeakHandle; +template class WeakHandle; diff --git a/src/hotspot/share/oops/weakHandle.hpp b/src/hotspot/share/oops/weakHandle.hpp --- a/src/hotspot/share/oops/weakHandle.hpp +++ b/src/hotspot/share/oops/weakHandle.hpp @@ -39,12 +39,11 @@ // This is the vm version of jweak but has different GC lifetimes and policies, // depending on the type. -enum WeakHandleType { vm_class_loader_data, vm_string }; +enum WeakHandleType { vm_class_loader_data, vm_string, vm_string_table_data }; template class WeakHandle { public: - private: oop* _obj; @@ -59,6 +58,8 @@ void release() const; bool is_null() const { return _obj == NULL; } + void replace(oop with_obj); + void print() const; void print_on(outputStream* st) const; }; diff --git a/src/hotspot/share/oops/weakHandle.inline.hpp b/src/hotspot/share/oops/weakHandle.inline.hpp --- a/src/hotspot/share/oops/weakHandle.inline.hpp +++ b/src/hotspot/share/oops/weakHandle.inline.hpp @@ -40,4 +40,10 @@ return RootAccess::oop_load(_obj); } +template +void WeakHandle::replace(oop with_obj) { + RootAccess::oop_store(_obj, with_obj); +} + #endif // SHARE_VM_OOPS_WEAKHANDLE_INLINE_HPP + diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -523,19 +523,6 @@ BeforeExit_lock->notify_all(); } - if (VerifyStringTableAtExit) { - int fail_cnt = 0; - { - MutexLocker ml(StringTable_lock); - fail_cnt = StringTable::verify_and_compare_entries(); - } - - if (fail_cnt != 0) { - tty->print_cr("ERROR: fail_cnt=%d", fail_cnt); - guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); - } - } - #undef BEFORE_EXIT_NOT_RUN #undef BEFORE_EXIT_RUNNING #undef BEFORE_EXIT_DONE diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -48,6 +48,8 @@ Mutex* JNIGlobalActive_lock = NULL; Mutex* JNIWeakAlloc_lock = NULL; Mutex* JNIWeakActive_lock = NULL; +Mutex* StringTableWeakAlloc_lock = NULL; +Mutex* StringTableWeakActive_lock = NULL; Mutex* JNIHandleBlockFreeList_lock = NULL; Mutex* VMWeakAlloc_lock = NULL; Mutex* VMWeakActive_lock = NULL; @@ -186,6 +188,9 @@ def(VMWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never); def(VMWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never); + def(StringTableWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never); + def(StringTableWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never); + if (UseConcMarkSweepGC || UseG1GC) { def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent } diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -42,6 +42,8 @@ extern Mutex* JNIGlobalActive_lock; // JNI global storage active list lock extern Mutex* JNIWeakAlloc_lock; // JNI weak storage allocate list lock extern Mutex* JNIWeakActive_lock; // JNI weak storage active list lock +extern Mutex* StringTableWeakAlloc_lock; // StringTable weak storage allocate list lock +extern Mutex* StringTableWeakActive_lock; // STringTable weak storage active list lock extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list extern Mutex* VMWeakAlloc_lock; // VM Weak Handles storage allocate list lock extern Mutex* VMWeakActive_lock; // VM Weak Handles storage active list lock diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/stringTable.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/serviceThread.hpp" @@ -82,6 +83,7 @@ bool has_gc_notification_event = false; bool has_dcmd_notification_event = false; bool acs_notify = false; + bool stringtable_work = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -98,7 +100,8 @@ while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && - !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && + !(stringtable_work = StringTable::has_work())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -109,6 +112,10 @@ } } + if (stringtable_work) { + StringTable::do_concurrent_work(jt); + } + if (has_jvmti_events) { jvmti_event.post(); } diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -164,7 +164,6 @@ typedef Hashtable IntptrHashtable; typedef Hashtable SymbolHashtable; typedef HashtableEntry SymbolHashtableEntry; -typedef Hashtable StringHashtable; typedef Hashtable KlassHashtable; typedef HashtableEntry KlassHashtableEntry; typedef CompactHashtable SymbolCompactHashTable; @@ -476,12 +475,6 @@ static_field(SymbolTable, _shared_table, SymbolCompactHashTable) \ static_field(RehashableSymbolHashtable, _seed, juint) \ \ - /***************/ \ - /* StringTable */ \ - /***************/ \ - \ - static_field(StringTable, _the_table, StringTable*) \ - \ /********************/ \ /* CompactHashTable */ \ /********************/ \ @@ -1365,7 +1358,6 @@ declare_toplevel_type(BasicHashtable) \ declare_type(RehashableSymbolHashtable, BasicHashtable) \ declare_type(SymbolTable, SymbolHashtable) \ - declare_type(StringTable, StringHashtable) \ declare_type(Dictionary, KlassHashtable) \ declare_toplevel_type(BasicHashtableEntry) \ declare_type(IntptrHashtableEntry, BasicHashtableEntry) \ diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -484,6 +484,9 @@ void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st, const char* table_name); + // Moves all nodes from this table to to_cht + bool try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); + // This is a Curiously Recurring Template Pattern (CRPT) interface for the // specialization. struct BaseConfig { diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -293,7 +293,7 @@ inline void ConcurrentHashTable:: write_synchonize_on_visible_epoch(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); OrderAccess::fence(); // Prevent below load from floating up. // If no reader saw this version we can skip write_synchronize. if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { @@ -488,7 +488,7 @@ { // Here we have resize lock so table is SMR safe, and there is no new // table. Can do this in parallel if we want. - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); Node* ndel[BULK_DELETE_LIMIT]; InternalTable* table = get_table(); assert(start_idx < stop_idx, "Must be"); @@ -500,9 +500,9 @@ // own read-side. GlobalCounter::critical_section_begin(thread); for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { - Bucket* bucket = _table->get_bucket(bucket_it); + Bucket* bucket = table->get_bucket(bucket_it); Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? - _table->get_bucket(bucket_it+1) : NULL; + table->get_bucket(bucket_it+1) : NULL; if (!HaveDeletables::value, EVALUATE_FUNC>:: have_deletable(bucket, eval_f, prefetch_bucket)) { @@ -695,17 +695,13 @@ if (!try_resize_lock(thread)) { return false; } - - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - + assert(_resize_lock_owner == thread, "Re-size lock not held"); if (_table->_log2_size == _log2_start_size || _table->_log2_size <= log2_size) { unlock_resize_lock(thread); return false; } - _new_table = new InternalTable(_table->_log2_size - 1); - return true; } @@ -713,8 +709,7 @@ inline void ConcurrentHashTable:: internal_shrink_epilog(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - assert(_resize_lock_owner, "Should be locked"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); InternalTable* old_table = set_table_from_new(); _size_limit_reached = false; @@ -771,14 +766,13 @@ internal_shrink(Thread* thread, size_t log2_size) { if (!internal_shrink_prolog(thread, log2_size)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return false; } - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); assert(_resize_lock_owner == thread, "Should be locked by me"); internal_shrink_range(thread, 0, _new_table->_size); internal_shrink_epilog(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -815,8 +809,7 @@ inline void ConcurrentHashTable:: internal_grow_epilog(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - assert(_resize_lock_owner, "Should be locked"); + assert(_resize_lock_owner == thread, "Should be locked"); InternalTable* old_table = set_table_from_new(); unlock_resize_lock(thread); @@ -835,14 +828,13 @@ internal_grow(Thread* thread, size_t log2_size) { if (!internal_grow_prolog(thread, log2_size)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return false; } - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); assert(_resize_lock_owner == thread, "Should be locked by me"); internal_grow_range(thread, 0, _table->_size); internal_grow_epilog(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -955,15 +947,13 @@ inline void ConcurrentHashTable:: do_scan_locked(Thread* thread, FUNC& scan_f) { - assert(_resize_lock->owned_by_self() || - (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()), - "Re-size lock not held or not VMThread at safepoint"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); // We can do a critical section over the entire loop but that would block // updates for a long time. Instead we choose to block resizes. InternalTable* table = get_table(); - for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { ScopedCS cs(thread, this); - if (!visit_nodes(_table->get_bucket(bucket_it), scan_f)) { + if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { break; /* ends critical section */ } } /* ends critical section */ @@ -1094,17 +1084,11 @@ inline bool ConcurrentHashTable:: try_scan(Thread* thread, SCAN_FUNC& scan_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); - bool vm_and_safepoint = thread->is_VM_thread() && - SafepointSynchronize::is_at_safepoint(); - if (!vm_and_safepoint && !try_resize_lock(thread)) { + if (!try_resize_lock(thread)) { return false; } do_scan_locked(thread, scan_f); - if (!vm_and_safepoint) { - unlock_resize_lock(thread); - } - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + unlock_resize_lock(thread); return true; } @@ -1113,11 +1097,11 @@ inline void ConcurrentHashTable:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); lock_resize_lock(thread); do_scan_locked(thread, scan_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); } template @@ -1126,12 +1110,11 @@ try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { if (!try_resize_lock(thread)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); return false; } do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -1140,11 +1123,9 @@ inline void ConcurrentHashTable:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); lock_resize_lock(thread); do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); } template @@ -1155,17 +1136,16 @@ { NumberSeq summary; size_t literal_bytes = 0; - if ((thread->is_VM_thread() && !SafepointSynchronize::is_at_safepoint()) || - (!thread->is_VM_thread() && !try_resize_lock(thread))) { + if (!try_resize_lock(thread)) { st->print_cr("statistics unavailable at this moment"); return; } InternalTable* table = get_table(); - for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { ScopedCS cs(thread, this); size_t count = 0; - Bucket* bucket = _table->get_bucket(bucket_it); + Bucket* bucket = table->get_bucket(bucket_it); if (bucket->have_redirect() || bucket->is_locked()) { continue; } @@ -1208,9 +1188,37 @@ st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); st->print_cr("Maximum bucket size : %9" PRIuPTR, (size_t)summary.maximum()); - if (!thread->is_VM_thread()) { - unlock_resize_lock(thread); + unlock_resize_lock(thread); +} + +template +inline bool ConcurrentHashTable:: + try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) +{ + if (!try_resize_lock(thread)) { + return false; } + assert(_new_table == NULL, "Must be NULL"); + for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + Bucket* bucket = _table->get_bucket(bucket_it); + assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); + while (bucket->first() != NULL) { + Node* move_node = bucket->first(); + bool ok = bucket->cas_first(move_node->next(), move_node); + assert(ok, "Uncontended cas must work"); + bool dead_hash = false; + size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); + if (!dead_hash) { + Bucket* insert_bucket = to_cht->get_bucket(insert_hash); + assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present"); + move_node->set_next(insert_bucket->first()); + ok = insert_bucket->cas_first(move_node, insert_bucket->first()); + assert(ok, "Uncontended cas must work"); + } + } + } + unlock_resize_lock(thread); + return true; } #endif // include guard diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/StringTable.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/StringTable.java deleted file mode 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/StringTable.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.memory; - -import java.io.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.oops.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.utilities.*; - -public class StringTable extends sun.jvm.hotspot.utilities.Hashtable { - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("StringTable"); - theTableField = type.getAddressField("_the_table"); - } - - // Fields - private static AddressField theTableField; - - // Accessors - public static StringTable getTheTable() { - Address tmp = theTableField.getValue(); - return (StringTable) VMObjectFactory.newObject(StringTable.class, tmp); - } - - public StringTable(Address addr) { - super(addr); - } - - public interface StringVisitor { - public void visit(Instance string); - } - - public void stringsDo(StringVisitor visitor) { - ObjectHeap oh = VM.getVM().getObjectHeap(); - int numBuckets = tableSize(); - for (int i = 0; i < numBuckets; i++) { - for (HashtableEntry e = (HashtableEntry) bucket(i); e != null; - e = (HashtableEntry) e.next()) { - Instance s = (Instance)oh.newOop(e.literalValue().addOffsetToAsOopHandle(0)); - visitor.visit(s); - } - } - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java @@ -79,7 +79,6 @@ private Universe universe; private ObjectHeap heap; private SymbolTable symbols; - private StringTable strings; private SystemDictionary dict; private ClassLoaderDataGraph cldGraph; private Threads threads; @@ -655,13 +654,6 @@ return symbols; } - public StringTable getStringTable() { - if (strings == null) { - strings = StringTable.getTheTable(); - } - return strings; - } - public SystemDictionary getSystemDictionary() { if (dict == null) { dict = new SystemDictionary(); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java @@ -129,7 +129,6 @@ } System.out.println(); - printInternStringStatistics(); } // Helper methods @@ -258,41 +257,4 @@ return -1; } } - - private void printInternStringStatistics() { - class StringStat implements StringTable.StringVisitor { - private int count; - private long size; - private OopField stringValueField; - - StringStat() { - VM vm = VM.getVM(); - SystemDictionary sysDict = vm.getSystemDictionary(); - InstanceKlass strKlass = sysDict.getStringKlass(); - // String has a field named 'value' of type 'byte[]'. - stringValueField = (OopField) strKlass.findField("value", "[B"); - } - - private long stringSize(Instance instance) { - // We include String content in size calculation. - return instance.getObjectSize() + - stringValueField.getValue(instance).getObjectSize(); - } - - public void visit(Instance str) { - count++; - size += stringSize(str); - } - - public void print() { - System.out.println(count + - " interned Strings occupying " + size + " bytes."); - } - } - - StringStat stat = new StringStat(); - StringTable strTable = VM.getVM().getStringTable(); - strTable.stringsDo(stat); - stat.print(); - } } diff --git a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp --- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp +++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp @@ -265,6 +265,40 @@ delete cht; } +struct ChtCountScan { + size_t _count; + ChtCountScan() : _count(0) {} + bool operator()(uintptr_t* val) { + _count++; + return true; /* continue scan */ + } +}; + +static void cht_move_to(Thread* thr) { + uintptr_t val1 = 0x2; + uintptr_t val2 = 0xe0000002; + uintptr_t val3 = 0x3; + SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); + SimpleTestTable* from_cht = new SimpleTestTable(); + EXPECT_TRUE(from_cht->insert(thr, stl1, val1)) << "Insert unique value failed."; + EXPECT_TRUE(from_cht->insert(thr, stl2, val2)) << "Insert unique value failed."; + EXPECT_TRUE(from_cht->insert(thr, stl3, val3)) << "Insert unique value failed."; + + SimpleTestTable* to_cht = new SimpleTestTable(); + EXPECT_TRUE(from_cht->try_move_nodes_to(thr, to_cht)) << "Moving nodes to new table failed"; + + ChtCountScan scan_old; + EXPECT_TRUE(from_cht->try_scan(thr, scan_old)) << "Scanning table should work."; + EXPECT_EQ(scan_old._count, (size_t)0) << "All items should be moved"; + + ChtCountScan scan_new; + EXPECT_TRUE(to_cht->try_scan(thr, scan_new)) << "Scanning table should work."; + EXPECT_EQ(scan_new._count, (size_t)3) << "All items should be moved"; + EXPECT_TRUE(to_cht->get_copy(thr, stl1) == val1) << "Getting an inserted value should work."; + EXPECT_TRUE(to_cht->get_copy(thr, stl2) == val2) << "Getting an inserted value should work."; + EXPECT_TRUE(to_cht->get_copy(thr, stl3) == val3) << "Getting an inserted value should work."; +} + static void cht_grow(Thread* thr) { uintptr_t val = 0x2; uintptr_t val2 = 0x22; @@ -371,6 +405,10 @@ nomt_test_doer(cht_scan); } +TEST_VM(ConcurrentHashTable, basic_move_to) { + nomt_test_doer(cht_move_to); +} + TEST_VM(ConcurrentHashTable, basic_grow) { nomt_test_doer(cht_grow); } # HG changeset patch # User rehn # Date 1528095518 -7200 # Mon Jun 04 08:58:38 2018 +0200 # Node ID 2de3e03b224ce7709a90e26833475f253ae7dd20 # Parent cd57d4d6515ef166e633aae233fa27bf336ae841 imported patch 8195097-stringtable-robbin diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -434,19 +434,6 @@ _par_state_string->oops_do(f); } -void StringTable::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - assert(is_alive != NULL, "No closure"); - StringTableIsAliveCounter stiac(is_alive); - weak_storage()->weak_oops_do(is_alive, f); - StringTable::the_table()->items_to_clean(stiac._count); - StringTable::the_table()->check_concurrent_work(); -} - -void StringTable::weak_oops_do(OopClosure* f) { - assert(f != NULL, "No closure"); - weak_storage()->weak_oops_do(f); -} - struct StringTableDeleteCheck { long _count; long _item; diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -117,23 +117,19 @@ static void unlink(BoolObjectClosure* cl) { unlink_or_oops_do(cl); } - static void unlink_or_oops_do(BoolObjectClosure* is_alive, - OopClosure* f = NULL, int* processed = NULL, - int* removed = NULL); + static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f = NULL, + int* processed = NULL, int* removed = NULL); // Serially invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); // Possibly parallel versions of the above static void possibly_parallel_unlink( - OopStorage::ParState* - _par_state_string, BoolObjectClosure* cl, int* processed, int* removed); + OopStorage::ParState* par_state_string, + BoolObjectClosure* cl, int* processed, int* removed); static void possibly_parallel_oops_do( - OopStorage::ParState* - _par_state_string,OopClosure* f); - - static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); - static void weak_oops_do(OopClosure* f); + OopStorage::ParState* par_state_string, + OopClosure* f); // Probing static oop lookup(Symbol* symbol); @@ -147,7 +143,7 @@ // Rehash the string table if it gets out of balance static void rehash_table(); static bool needs_rehashing() - { return StringTable::the_table()->_needs_rehashing; } + { return StringTable::the_table()->_needs_rehashing; } // Sharing oop lookup_shared(jchar* name, int len, unsigned int hash); # HG changeset patch # User rehn # Date 1528124766 -7200 # Mon Jun 04 17:06:06 2018 +0200 # Node ID 16ccb4b668577fcf300aeb00c63ba55c8450058d # Parent 2de3e03b224ce7709a90e26833475f253ae7dd20 [mq]: 8195097-stringtable-v2 diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -55,8 +55,6 @@ // We prefer short chains of avg 2 #define PREF_AVG_LIST_LEN 2 -// We start with same old size, consider to reduce this -#define START_SIZE 16 // 2^24 is max size #define END_SIZE 24 // If a chain gets to 32 something might be wrong @@ -114,7 +112,7 @@ } }; -class StringTableLookupJchar { +class StringTableLookupJchar : StackObj { private: Thread* _thread; uintx _hash; @@ -178,13 +176,22 @@ } }; +static size_t nearest_pow_2(uintx val) { + size_t ret; + for (ret = 1; ((size_t)1 << ret) < val; ++ret); + return ret; +} + StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) { _weak_handles = new OopStorage("StringTable weak", StringTableWeakAlloc_lock, StringTableWeakActive_lock); - _local_table = new StringTableHash(START_SIZE, END_SIZE, REHASH_LEN); - _current_size = ((size_t)1) << START_SIZE; + size_t start_size_log_2 = nearest_pow_2(StringTableSize); + _current_size = ((size_t)1) << start_size_log_2; + log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", + _current_size, start_size_log_2); + _local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN); } size_t StringTable::item_added() { @@ -212,6 +219,18 @@ return (_uncleaned_items*1.0)/_current_size; } +size_t StringTable::table_size(Thread* thread) { + return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread + : Thread::current()); +} + +void StringTable::trigger_concurrent_work() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + the_table()->_has_work = true; + Service_lock->notify_all(); +} + +// Probing oop StringTable::lookup(Symbol* symbol) { ResourceMark rm; int length; @@ -228,7 +247,7 @@ if (StringTable::_alt_hash) { hash = hash_string(name, len, true); } - return StringTable::the_table()->do_lookup( name, len, hash); + return StringTable::the_table()->do_lookup(name, len, hash); } class StringTableGet : public StackObj { @@ -258,74 +277,7 @@ return stg.get_res_oop(); } -class StringTableCreateEntry : public StackObj { - private: - Thread* _thread; - Handle _return; - Handle _store; - public: - StringTableCreateEntry(Thread* thread, Handle store) - : _thread(thread), _store(store) {} - - WeakHandle operator()() { // No dups found - WeakHandle wh = - WeakHandle::create(_store); - return wh; - } - void operator()(bool inserted, WeakHandle* val) { - oop result = val->resolve(); - assert(result != NULL, "Result should be reachable"); - _return = Handle(_thread, result); - } - oop get_return() const { - return _return(); - } -}; - -oop StringTable::intern(Handle string_or_null_h, jchar* name, int len, TRAPS) { - // shared table always uses java_lang_String::hash_code - unsigned int hash = java_lang_String::hash_code(name, len); - oop found_string = StringTable::the_table()->lookup_shared(name, len, hash); - if (found_string != NULL) { - return found_string; - } - if (StringTable::_alt_hash) { - hash = hash_string(name, len, true); - } - return StringTable::the_table()->do_intern(string_or_null_h, name, len, - hash, CHECK_NULL); -} - -oop StringTable::do_intern(Handle string_or_null_h, jchar* name, - int len, uintx hash, TRAPS) { - HandleMark hm(THREAD); // cleanup strings created - Handle string_h; - - if (!string_or_null_h.is_null()) { - string_h = string_or_null_h; - } else { - string_h = java_lang_String::create_from_unicode(name, len, CHECK_NULL); - } - - // Deduplicate the string before it is interned. Note that we should never - // deduplicate a string after it has been interned. Doing so will counteract - // compiler optimizations done on e.g. interned string literals. - Universe::heap()->deduplicate_string(string_h()); - - assert(java_lang_String::equals(string_h(), name, len), - "string must be properly initialized"); - assert(len == java_lang_String::length(string_h()), "Must be same length"); - StringTableLookupOop lookup(THREAD, hash, string_h); - StringTableCreateEntry stc(THREAD, string_h); - - bool rehash_warning; - _local_table->get_insert_lazy(THREAD, lookup, stc, stc, &rehash_warning); - if (rehash_warning) { - _needs_rehashing = true; - } - return stc.get_return(); -} - +// Interning oop StringTable::intern(Symbol* symbol, TRAPS) { if (symbol == NULL) return NULL; ResourceMark rm(THREAD); @@ -358,11 +310,75 @@ return result; } -size_t StringTable::table_size(Thread* thread) { - return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread - : Thread::current()); +oop StringTable::intern(Handle string_or_null_h, jchar* name, int len, TRAPS) { + // shared table always uses java_lang_String::hash_code + unsigned int hash = java_lang_String::hash_code(name, len); + oop found_string = StringTable::the_table()->lookup_shared(name, len, hash); + if (found_string != NULL) { + return found_string; + } + if (StringTable::_alt_hash) { + hash = hash_string(name, len, true); + } + return StringTable::the_table()->do_intern(string_or_null_h, name, len, + hash, CHECK_NULL); } +class StringTableCreateEntry : public StackObj { + private: + Thread* _thread; + Handle _return; + Handle _store; + public: + StringTableCreateEntry(Thread* thread, Handle store) + : _thread(thread), _store(store) {} + + WeakHandle operator()() { // No dups found + WeakHandle wh = + WeakHandle::create(_store); + return wh; + } + void operator()(bool inserted, WeakHandle* val) { + oop result = val->resolve(); + assert(result != NULL, "Result should be reachable"); + _return = Handle(_thread, result); + } + oop get_return() const { + return _return(); + } +}; + +oop StringTable::do_intern(Handle string_or_null_h, jchar* name, + int len, uintx hash, TRAPS) { + HandleMark hm(THREAD); // cleanup strings created + Handle string_h; + + if (!string_or_null_h.is_null()) { + string_h = string_or_null_h; + } else { + string_h = java_lang_String::create_from_unicode(name, len, CHECK_NULL); + } + + // Deduplicate the string before it is interned. Note that we should never + // deduplicate a string after it has been interned. Doing so will counteract + // compiler optimizations done on e.g. interned string literals. + Universe::heap()->deduplicate_string(string_h()); + + assert(java_lang_String::equals(string_h(), name, len), + "string must be properly initialized"); + assert(len == java_lang_String::length(string_h()), "Must be same length"); + StringTableLookupOop lookup(THREAD, hash, string_h); + StringTableCreateEntry stc(THREAD, string_h); + + bool rehash_warning; + _local_table->get_insert_lazy(THREAD, lookup, stc, stc, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; + } + return stc.get_return(); +} + +// GC support class StringTableIsAliveCounter : public BoolObjectClosure { BoolObjectClosure* _real_boc; public: @@ -380,12 +396,6 @@ } }; -void StringTable::trigger_concurrent_work() { - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - the_table()->_has_work = true; - Service_lock->notify_all(); -} - void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { DoNothingClosure dnc; @@ -434,30 +444,7 @@ _par_state_string->oops_do(f); } -struct StringTableDeleteCheck { - long _count; - long _item; - StringTableDeleteCheck() : _count(0), _item(0) {} - bool operator()(WeakHandle* val) { - ++_item; - oop tmp = val->peek(); - if (tmp == NULL) { - ++_count; - return true; - } else { - return false; - } - } -}; - -struct StringTableDoDelete { - long _count; - StringTableDoDelete() : _count(0) {} - void operator()(WeakHandle* val) { - ++_count; - } -}; - +// Concurrent work void StringTable::grow(JavaThread* jt) { StringTableHash::GrowTask gt(_local_table); if (!gt.prepare(jt)) { @@ -479,6 +466,30 @@ log_debug(stringtable)("Growed to size:" SIZE_FORMAT, _current_size); } +struct StringTableDoDelete : StackObj { + long _count; + StringTableDoDelete() : _count(0) {} + void operator()(WeakHandle* val) { + ++_count; + } +}; + +struct StringTableDeleteCheck : StackObj { + long _count; + long _item; + StringTableDeleteCheck() : _count(0), _item(0) {} + bool operator()(WeakHandle* val) { + ++_item; + oop tmp = val->peek(); + if (tmp == NULL) { + ++_count; + return true; + } else { + return false; + } + } +}; + void StringTable::clean_dead_entries(JavaThread* jt) { StringTableHash::BulkDeleteTask bdt(_local_table); if (!bdt.prepare(jt)) { @@ -513,26 +524,26 @@ if (_has_work) { return; } - double fact = StringTable::get_load_factor(); - double dead_fact = StringTable::get_dead_factor(); + double load_factor = StringTable::get_load_factor(); + double dead_factor = StringTable::get_dead_factor(); // We should clean/resize if we have more dead than alive, // more items than preferred load factor or // more dead items than water mark. - if ((dead_fact > fact) || - (fact > PREF_AVG_LIST_LEN) || - (dead_fact > CLEAN_DEAD_HIGH_WATER_MARK)) { + if ((dead_factor > load_factor) || + (load_factor > PREF_AVG_LIST_LEN) || + (dead_factor > CLEAN_DEAD_HIGH_WATER_MARK)) { log_debug(stringtable)("Concurrent work triggered, live factor:%g dead factor:%g", - fact, dead_fact); + load_factor, dead_factor); trigger_concurrent_work(); } } void StringTable::concurrent_work(JavaThread* jt) { _has_work = false; - double fact = get_load_factor(); - log_debug(stringtable, perf)("Concurrent work, live factor: %g", fact); + double load_factor = get_load_factor(); + log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor); // We prefer growing, since that also removes dead items - if (fact > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { grow(jt); } else { clean_dead_entries(jt); @@ -543,6 +554,7 @@ StringTable::the_table()->concurrent_work(jt); } +// Rehash bool StringTable::do_rehash() { if (!_local_table->is_safepoint_safe()) { return false; @@ -600,12 +612,7 @@ StringTable::the_table()->try_rehash_table(); } -oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { - assert(hash == java_lang_String::hash_code(name, len), - "hash must be computed using java_lang_String::hash_code"); - return _shared_table.lookup((const char*)name, hash, len); -} - +// Statistics static int literal_size(oop obj) { // NOTE: this would over-count if (pre-JDK8) // java_lang_Class::has_offset_field() is true and the String.value array is @@ -620,7 +627,7 @@ } } -struct SizeFunc { +struct SizeFunc : StackObj { size_t operator()(WeakHandle* val) { oop s = val->peek(); if (s == NULL) { @@ -637,7 +644,67 @@ _local_table->statistics_to(Thread::current(), sz, st, table_name); } -class PrintString { +// Verification +class VerifyStrings : StackObj { + public: + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s != NULL) { + assert(java_lang_String::length(s) >= 0, "Length on string must work."); + } + return true; + }; +}; + +// This verification is part of Universe::verify() and needs to be quick. +void StringTable::verify() { + Thread* thr = Thread::current(); + VerifyStrings vs; + if (!the_table()->_local_table->try_scan(thr, vs)) { + log_info(stringtable)("verify unavailable at this moment"); + } +} + +// Verification and comp +class VerifyCompStrings : StackObj { + GrowableArray* _oops; + public: + size_t _errors; + VerifyCompStrings(GrowableArray* oops) : _oops(oops), _errors(0) {} + bool operator()(WeakHandle* val) { + oop s = val->resolve(); + if (s == NULL) { + return true; + } + int len = _oops->length(); + for (int i = 0; i < len; i++) { + bool eq = java_lang_String::equals(s, _oops->at(i)); + assert(!eq, "Duplicate strings"); + if (eq) { + _errors++; + } + } + _oops->push(s); + return true; + }; +}; + +size_t StringTable::verify_and_compare_entries() { + Thread* thr = Thread::current(); + GrowableArray* oops = + new (ResourceObj::C_HEAP, mtInternal) + GrowableArray((int)the_table()->_current_size, true); + + VerifyCompStrings vcs(oops); + if (!the_table()->_local_table->try_scan(thr, vcs)) { + log_info(stringtable)("verify unavailable at this moment"); + } + delete oops; + return vcs._errors; +} + +// Dumping +class PrintString : StackObj { Thread* _thr; outputStream* _st; public: @@ -688,26 +755,6 @@ } } -class VerifyStrings { - public: - bool operator()(WeakHandle* val) { - oop s = val->peek(); - if (s != NULL) { - assert(java_lang_String::length(s) >= 0, "Length on string must work."); - } - return true; - }; -}; - -// This verification is part of Universe::verify() and needs to be quick. -void StringTable::verify() { - Thread* thr = Thread::current(); - VerifyStrings vs; - if (!the_table()->_local_table->try_scan(thr, vs)) { - log_info(stringtable)("verify unavailable at this moment"); - } -} - // Utility for dumping strings StringtableDCmd::StringtableDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), @@ -733,8 +780,14 @@ } } +// Sharing #if INCLUDE_CDS_JAVA_HEAP -// Sharing +oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { + assert(hash == java_lang_String::hash_code(name, len), + "hash must be computed using java_lang_String::hash_code"); + return _shared_table.lookup((const char*)name, hash, len); +} + oop StringTable::create_archived_string(oop s, Thread* THREAD) { assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); @@ -755,9 +808,9 @@ return new_s; } -struct CopyArchive { +struct CopyToArchive : StackObj { CompactStringTableWriter* _writer; - CopyArchive(CompactStringTableWriter* writer) : _writer(writer) {} + CopyToArchive(CompactStringTableWriter* writer) : _writer(writer) {} bool operator()(WeakHandle* val) { oop s = val->peek(); if (s == NULL) { @@ -781,10 +834,10 @@ } }; -void StringTable::copy_shared_string(CompactStringTableWriter* writer) { +void StringTable::copy_shared_string_table(CompactStringTableWriter* writer) { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); - CopyArchive copy(writer); + CopyToArchive copy(writer); StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); } @@ -798,7 +851,7 @@ &MetaspaceShared::stats()->string); // Copy the interned strings into the "string space" within the java heap - copy_shared_string(&writer); + copy_shared_string_table(&writer); writer.dump(&_shared_table); } @@ -817,5 +870,4 @@ void StringTable::shared_oops_do(OopClosure* f) { _shared_table.oops_do(f); } - #endif //INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -71,9 +71,9 @@ OopStorage* _weak_handles; volatile size_t _items; - DEFINE_PAD_MINUS_SIZE(1, 64, sizeof(volatile size_t)); + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); volatile size_t _uncleaned_items; - DEFINE_PAD_MINUS_SIZE(2, 64, sizeof(volatile size_t)); + DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); double get_load_factor(); double get_dead_factor(); @@ -146,19 +146,21 @@ { return StringTable::the_table()->_needs_rehashing; } // Sharing - oop lookup_shared(jchar* name, int len, unsigned int hash); + private: + oop lookup_shared(jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(NULL); + static void copy_shared_string_table(CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN; + public: static oop create_archived_string(oop s, Thread* THREAD); static void set_shared_string_mapped() { _shared_string_mapped = true; } static bool shared_string_mapped() { return _shared_string_mapped; } static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN; - static void copy_shared_string(CompactStringTableWriter* ch_table) - NOT_CDS_JAVA_HEAP_RETURN; static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN; static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN; // Jcmd static void dump(outputStream* st, bool verbose=false); // Debugging + static size_t verify_and_compare_entries(); static void verify(); }; diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -859,9 +859,9 @@ // from the StringTable are the individual tasks. // Either we should be single threaded or have a ParState - assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but not ParState"); + assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but no ParState"); - if (scope->n_threads() > 1 && par_state_string != NULL) { + if (scope->n_threads() > 1) { StringTable::possibly_parallel_oops_do(par_state_string, root_closure); } else { StringTable::oops_do(root_closure); diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -2545,8 +2545,9 @@ "Relax the access control checks in the verifier") \ \ product(uintx, StringTableSize, defaultStringTableSize, \ - "Number of buckets in the interned String table") \ - range(minimumStringTableSize, 111*defaultStringTableSize) \ + "Number of buckets in the interned String table " \ + "(will be rounded to nearest higher power of 2)") \ + range(minimumStringTableSize, 16777216ul) \ \ experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ "Number of buckets in the JVM internal Symbol table") \ diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -523,6 +523,14 @@ BeforeExit_lock->notify_all(); } + if (VerifyStringTableAtExit) { + size_t fail_cnt = StringTable::verify_and_compare_entries(); + if (fail_cnt != 0) { + tty->print_cr("ERROR: fail_cnt=" SIZE_FORMAT, fail_cnt); + guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); + } + } + #undef BEFORE_EXIT_NOT_RUN #undef BEFORE_EXIT_RUNNING #undef BEFORE_EXIT_DONE diff --git a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP +#include "utilities/globalDefinitions.hpp" #include "utilities/concurrentHashTable.inline.hpp" // This inline file contains BulkDeleteTask and GrowTasks which are both bucket @@ -63,6 +64,7 @@ // Calculate starting values. void setup() { _size_log2 = _cht->_table->_log2_size; + _task_size_log2 = MIN2(_task_size_log2, _size_log2); size_t tmp = _size_log2 > _task_size_log2 ? _size_log2 - _task_size_log2 : 0; _stop_task = (((size_t)1) << tmp); diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -424,8 +424,8 @@ //---------------------------------------------------------------------------------------------------- // Default and minimum StringTableSize values -const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); -const int minimumStringTableSize = 1009; +const int defaultStringTableSize = NOT_LP64(1024) LP64_ONLY(65536); +const int minimumStringTableSize = 128; const int defaultSymbolTableSize = 20011; const int minimumSymbolTableSize = 1009;