1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/altHashing.hpp" 27 #include "classfile/dictionary.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/moduleEntry.hpp" 30 #include "classfile/packageEntry.hpp" 31 #include "classfile/protectionDomainCache.hpp" 32 #include "classfile/stringTable.hpp" 33 #include "memory/allocation.inline.hpp" 34 #include "memory/filemap.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/safepoint.hpp" 38 #include "utilities/dtrace.hpp" 39 #include "utilities/hashtable.hpp" 40 #include "utilities/hashtable.inline.hpp" 41 #include "utilities/numberSeq.hpp" 42 43 44 // This hashtable is implemented as an open hash table with a fixed number of buckets. 45 46 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() { 47 BasicHashtableEntry<F>* entry = NULL; 48 if (_free_list != NULL) { 49 entry = _free_list; 50 _free_list = _free_list->next(); 51 } 52 return entry; 53 } 54 55 // HashtableEntrys are allocated in blocks to reduce the space overhead. 56 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) { 57 BasicHashtableEntry<F>* entry = new_entry_free_list(); 58 59 if (entry == NULL) { 60 if (_first_free_entry + _entry_size >= _end_block) { 61 int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); 62 int len = _entry_size * block_size; 63 len = 1 << log2_intptr(len); // round down to power of 2 64 assert(len >= _entry_size, ""); 65 _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); 66 _end_block = _first_free_entry + len; 67 } 68 entry = (BasicHashtableEntry<F>*)_first_free_entry; 69 _first_free_entry += _entry_size; 70 } 71 72 assert(_entry_size % HeapWordSize == 0, ""); 73 entry->set_hash(hashValue); 74 return entry; 75 } 76 77 78 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) { 79 HashtableEntry<T, F>* entry; 80 81 entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue); 82 entry->set_literal(obj); 83 return entry; 84 } 85 86 // Check to see if the hashtable is unbalanced. The caller set a flag to 87 // rehash at the next safepoint. If this bucket is 60 times greater than the 88 // expected average bucket length, it's an unbalanced hashtable. 89 // This is somewhat an arbitrary heuristic but if one bucket gets to 90 // rehash_count which is currently 100, there's probably something wrong. 91 92 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) { 93 assert(this->table_size() != 0, "underflow"); 94 if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { 95 // Set a flag for the next safepoint, which should be at some guaranteed 96 // safepoint interval. 97 return true; 98 } 99 return false; 100 } 101 102 template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0; 103 104 // Create a new table and using alternate hash code, populate the new table 105 // with the existing elements. This can be used to change the hash code 106 // and could in the future change the size of the table. 107 108 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) { 109 110 // Initialize the global seed for hashing. 111 _seed = AltHashing::compute_seed(); 112 assert(seed() != 0, "shouldn't be zero"); 113 114 int saved_entry_count = this->number_of_entries(); 115 116 // Iterate through the table and create a new entry for the new table 117 for (int i = 0; i < new_table->table_size(); ++i) { 118 for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) { 119 HashtableEntry<T, F>* next = p->next(); 120 T string = p->literal(); 121 // Use alternate hashing algorithm on the symbol in the first table 122 unsigned int hashValue = string->new_hash(seed()); 123 // Get a new index relative to the new table (can also change size) 124 int index = new_table->hash_to_index(hashValue); 125 p->set_hash(hashValue); 126 // Keep the shared bit in the Hashtable entry to indicate that this entry 127 // can't be deleted. The shared bit is the LSB in the _next field so 128 // walking the hashtable past these entries requires 129 // BasicHashtableEntry::make_ptr() call. 130 bool keep_shared = p->is_shared(); 131 this->unlink_entry(p); 132 new_table->add_entry(index, p); 133 if (keep_shared) { 134 p->set_shared(); 135 } 136 p = next; 137 } 138 } 139 // give the new table the free list as well 140 new_table->copy_freelist(this); 141 assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); 142 143 // Destroy memory used by the buckets in the hashtable. The memory 144 // for the elements has been used in a new table and is not 145 // destroyed. The memory reuse will benefit resizing the SystemDictionary 146 // to avoid a memory allocation spike at safepoint. 147 BasicHashtable<F>::free_buckets(); 148 } 149 150 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() { 151 if (NULL != _buckets) { 152 // Don't delete the buckets in the shared space. They aren't 153 // allocated by os::malloc 154 if (!UseSharedSpaces || 155 !FileMapInfo::current_info()->is_in_shared_space(_buckets)) { 156 FREE_C_HEAP_ARRAY(HashtableBucket, _buckets); 157 } 158 _buckets = NULL; 159 } 160 } 161 162 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) { 163 entry->set_next(_removed_head); 164 _removed_head = entry; 165 if (_removed_tail == NULL) { 166 _removed_tail = entry; 167 } 168 _num_removed++; 169 } 170 171 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) { 172 if (context->_num_removed == 0) { 173 assert(context->_removed_head == NULL && context->_removed_tail == NULL, 174 "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT, 175 p2i(context->_removed_head), p2i(context->_removed_tail)); 176 return; 177 } 178 179 // MT-safe add of the list of BasicHashTableEntrys from the context to the free list. 180 BasicHashtableEntry<F>* current = _free_list; 181 while (true) { 182 context->_removed_tail->set_next(current); 183 BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current); 184 if (old == current) { 185 break; 186 } 187 current = old; 188 } 189 Atomic::add(-context->_num_removed, &_number_of_entries); 190 } 191 192 // Copy the table to the shared space. 193 194 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) { 195 196 // Dump the hash table entries. 197 198 intptr_t *plen = (intptr_t*)(*top); 199 *top += sizeof(*plen); 200 201 int i; 202 for (i = 0; i < _table_size; ++i) { 203 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 204 *p != NULL; 205 p = (*p)->next_addr()) { 206 if (*top + entry_size() > end) { 207 report_out_of_shared_space(SharedMiscData); 208 } 209 *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size()); 210 *top += entry_size(); 211 } 212 } 213 *plen = (char*)(*top) - (char*)plen - sizeof(*plen); 214 215 // Set the shared bit. 216 217 for (i = 0; i < _table_size; ++i) { 218 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { 219 p->set_shared(); 220 } 221 } 222 } 223 224 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) { 225 return symbol->size() * HeapWordSize; 226 } 227 228 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) { 229 // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, 230 // and the String.value array is shared by several Strings. However, starting from JDK8, 231 // the String.value array is not shared anymore. 232 assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported"); 233 return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize; 234 } 235 236 // Dump footprint and bucket length statistics 237 // 238 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to 239 // add a new function Hashtable<T, F>::literal_size(MyNewType lit) 240 241 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) { 242 NumberSeq summary; 243 int literal_bytes = 0; 244 for (int i = 0; i < this->table_size(); ++i) { 245 int count = 0; 246 for (HashtableEntry<T, F>* e = this->bucket(i); 247 e != NULL; e = e->next()) { 248 count++; 249 literal_bytes += literal_size(e->literal()); 250 } 251 summary.add((double)count); 252 } 253 double num_buckets = summary.num(); 254 double num_entries = summary.sum(); 255 256 int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>); 257 int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>); 258 int total_bytes = literal_bytes + bucket_bytes + entry_bytes; 259 260 double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); 261 double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); 262 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); 263 264 st->print_cr("%s statistics:", table_name); 265 st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg); 266 st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg); 267 st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); 268 st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); 269 st->print_cr("Average bucket size : %9.3f", summary.avg()); 270 st->print_cr("Variance of bucket size : %9.3f", summary.variance()); 271 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); 272 st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); 273 } 274 275 276 // Dump the hash table buckets. 277 278 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) { 279 intptr_t len = _table_size * sizeof(HashtableBucket<F>); 280 *(intptr_t*)(*top) = len; 281 *top += sizeof(intptr_t); 282 283 *(intptr_t*)(*top) = _number_of_entries; 284 *top += sizeof(intptr_t); 285 286 if (*top + len > end) { 287 report_out_of_shared_space(SharedMiscData); 288 } 289 _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len); 290 *top += len; 291 } 292 293 294 #ifndef PRODUCT 295 296 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() { 297 ResourceMark rm; 298 299 for (int i = 0; i < BasicHashtable<F>::table_size(); i++) { 300 HashtableEntry<T, F>* entry = bucket(i); 301 while(entry != NULL) { 302 tty->print("%d : ", i); 303 entry->literal()->print(); 304 tty->cr(); 305 entry = entry->next(); 306 } 307 } 308 } 309 310 311 template <MEMFLAGS F> 312 template <class T> void BasicHashtable<F>::verify_table(const char* table_name) { 313 int element_count = 0; 314 int max_bucket_count = 0; 315 for (int index = 0; index < table_size(); index++) { 316 int bucket_count = 0; 317 for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) { 318 probe->verify(); 319 bucket_count++; 320 } 321 element_count += bucket_count; 322 max_bucket_count = MAX2(max_bucket_count, bucket_count); 323 } 324 guarantee(number_of_entries() == element_count, 325 "Verify of %s failed", table_name); 326 DEBUG_ONLY(verify_lookup_length(max_bucket_count, table_name)); 327 } 328 329 330 #endif // PRODUCT 331 332 #ifdef ASSERT 333 334 // Assert if the longest bucket is 10x longer than the average bucket size. 335 // Could change back to a warning, but warnings are not noticed. 336 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(int max_bucket_count, const char *table_name) { 337 log_info(hashtables)("%s max bucket size %d element count %d table size %d", table_name, 338 max_bucket_count, _number_of_entries, _table_size); 339 assert (max_bucket_count < ((1 + number_of_entries()/table_size())*10), "Table is unbalanced"); 340 } 341 342 #endif 343 344 345 // Explicitly instantiate these types 346 #if INCLUDE_ALL_GCS 347 template class Hashtable<nmethod*, mtGC>; 348 template class HashtableEntry<nmethod*, mtGC>; 349 template class BasicHashtable<mtGC>; 350 #endif 351 template class Hashtable<ConstantPool*, mtClass>; 352 template class RehashableHashtable<Symbol*, mtSymbol>; 353 template class RehashableHashtable<oopDesc*, mtSymbol>; 354 template class Hashtable<Symbol*, mtSymbol>; 355 template class Hashtable<Klass*, mtClass>; 356 template class Hashtable<InstanceKlass*, mtClass>; 357 template class Hashtable<oop, mtClass>; 358 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) 359 template class Hashtable<oop, mtSymbol>; 360 template class RehashableHashtable<oop, mtSymbol>; 361 #endif // SOLARIS || CHECK_UNHANDLED_OOPS 362 template class Hashtable<oopDesc*, mtSymbol>; 363 template class Hashtable<Symbol*, mtClass>; 364 template class HashtableEntry<Symbol*, mtSymbol>; 365 template class HashtableEntry<Symbol*, mtClass>; 366 template class HashtableEntry<oop, mtSymbol>; 367 template class BasicHashtableEntry<mtSymbol>; 368 template class BasicHashtableEntry<mtCode>; 369 template class BasicHashtable<mtClass>; 370 template class BasicHashtable<mtClassShared>; 371 template class BasicHashtable<mtSymbol>; 372 template class BasicHashtable<mtCode>; 373 template class BasicHashtable<mtInternal>; 374 template class BasicHashtable<mtModule>; 375 #if INCLUDE_TRACE 376 template class Hashtable<Symbol*, mtTracing>; 377 template class HashtableEntry<Symbol*, mtTracing>; 378 template class BasicHashtable<mtTracing>; 379 #endif 380 template class BasicHashtable<mtCompiler>; 381 382 template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*); 383 template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*); 384 template void BasicHashtable<mtModule>::verify_table<PackageEntry>(char const*); 385 template void BasicHashtable<mtClass>::verify_table<ProtectionDomainCacheEntry>(char const*);