1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/altHashing.hpp" 27 #include "classfile/dictionary.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/moduleEntry.hpp" 30 #include "classfile/packageEntry.hpp" 31 #include "classfile/placeholders.hpp" 32 #include "classfile/protectionDomainCache.hpp" 33 #include "classfile/stringTable.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "oops/weakHandle.inline.hpp" 40 #include "runtime/safepoint.hpp" 41 #include "utilities/dtrace.hpp" 42 #include "utilities/hashtable.hpp" 43 #include "utilities/hashtable.inline.hpp" 44 #include "utilities/numberSeq.hpp" 45 46 47 // This hashtable is implemented as an open hash table with a fixed number of buckets. 48 49 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() { 50 BasicHashtableEntry<F>* entry = NULL; 51 if (_free_list != NULL) { 52 entry = _free_list; 53 _free_list = _free_list->next(); 54 } 55 return entry; 56 } 57 58 // HashtableEntrys are allocated in blocks to reduce the space overhead. 59 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) { 60 BasicHashtableEntry<F>* entry = new_entry_free_list(); 61 62 if (entry == NULL) { 63 if (_first_free_entry + _entry_size >= _end_block) { 64 int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); 65 int len = _entry_size * block_size; 66 len = 1 << log2_intptr(len); // round down to power of 2 67 assert(len >= _entry_size, ""); 68 _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); 69 _end_block = _first_free_entry + len; 70 } 71 entry = (BasicHashtableEntry<F>*)_first_free_entry; 72 _first_free_entry += _entry_size; 73 } 74 75 assert(_entry_size % HeapWordSize == 0, ""); 76 entry->set_hash(hashValue); 77 return entry; 78 } 79 80 81 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) { 82 HashtableEntry<T, F>* entry; 83 84 entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue); 85 entry->set_literal(obj); 86 return entry; 87 } 88 89 // Version of hashtable entry allocation that allocates in the C heap directly. 90 // The allocator in blocks is preferable but doesn't have free semantics. 91 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) { 92 HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F); 93 94 entry->set_hash(hashValue); 95 entry->set_literal(obj); 96 entry->set_next(NULL); 97 return entry; 98 } 99 100 // Check to see if the hashtable is unbalanced. The caller set a flag to 101 // rehash at the next safepoint. If this bucket is 60 times greater than the 102 // expected average bucket length, it's an unbalanced hashtable. 103 // This is somewhat an arbitrary heuristic but if one bucket gets to 104 // rehash_count which is currently 100, there's probably something wrong. 105 106 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) { 107 assert(this->table_size() != 0, "underflow"); 108 if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { 109 // Set a flag for the next safepoint, which should be at some guaranteed 110 // safepoint interval. 111 return true; 112 } 113 return false; 114 } 115 116 // Create a new table and using alternate hash code, populate the new table 117 // with the existing elements. This can be used to change the hash code 118 // and could in the future change the size of the table. 119 120 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) { 121 122 // Initialize the global seed for hashing. 123 _seed = AltHashing::compute_seed(); 124 assert(seed() != 0, "shouldn't be zero"); 125 126 int saved_entry_count = this->number_of_entries(); 127 128 // Iterate through the table and create a new entry for the new table 129 for (int i = 0; i < new_table->table_size(); ++i) { 130 for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) { 131 HashtableEntry<T, F>* next = p->next(); 132 T string = p->literal(); 133 // Use alternate hashing algorithm on the symbol in the first table 134 unsigned int hashValue = string->new_hash(seed()); 135 // Get a new index relative to the new table (can also change size) 136 int index = new_table->hash_to_index(hashValue); 137 p->set_hash(hashValue); 138 // Keep the shared bit in the Hashtable entry to indicate that this entry 139 // can't be deleted. The shared bit is the LSB in the _next field so 140 // walking the hashtable past these entries requires 141 // BasicHashtableEntry::make_ptr() call. 142 bool keep_shared = p->is_shared(); 143 this->unlink_entry(p); 144 new_table->add_entry(index, p); 145 if (keep_shared) { 146 p->set_shared(); 147 } 148 p = next; 149 } 150 } 151 // give the new table the free list as well 152 new_table->copy_freelist(this); 153 154 // Destroy memory used by the buckets in the hashtable. The memory 155 // for the elements has been used in a new table and is not 156 // destroyed. The memory reuse will benefit resizing the SystemDictionary 157 // to avoid a memory allocation spike at safepoint. 158 BasicHashtable<F>::free_buckets(); 159 } 160 161 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() { 162 if (NULL != _buckets) { 163 // Don't delete the buckets in the shared space. They aren't 164 // allocated by os::malloc 165 if (!MetaspaceShared::is_in_shared_metaspace(_buckets)) { 166 FREE_C_HEAP_ARRAY(HashtableBucket, _buckets); 167 } 168 _buckets = NULL; 169 } 170 } 171 172 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) { 173 entry->set_next(_removed_head); 174 _removed_head = entry; 175 if (_removed_tail == NULL) { 176 _removed_tail = entry; 177 } 178 _num_removed++; 179 } 180 181 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) { 182 if (context->_num_removed == 0) { 183 assert(context->_removed_head == NULL && context->_removed_tail == NULL, 184 "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT, 185 p2i(context->_removed_head), p2i(context->_removed_tail)); 186 return; 187 } 188 189 // MT-safe add of the list of BasicHashTableEntrys from the context to the free list. 190 BasicHashtableEntry<F>* current = _free_list; 191 while (true) { 192 context->_removed_tail->set_next(current); 193 BasicHashtableEntry<F>* old = Atomic::cmpxchg(context->_removed_head, &_free_list, current); 194 if (old == current) { 195 break; 196 } 197 current = old; 198 } 199 Atomic::add(-context->_num_removed, &_number_of_entries); 200 } 201 // Copy the table to the shared space. 202 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() { 203 size_t bytes = 0; 204 bytes += sizeof(intptr_t); // len 205 206 for (int i = 0; i < _table_size; ++i) { 207 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 208 *p != NULL; 209 p = (*p)->next_addr()) { 210 bytes += entry_size(); 211 } 212 } 213 214 return bytes; 215 } 216 217 // Dump the hash table entries (into CDS archive) 218 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) { 219 assert(is_aligned(top, sizeof(intptr_t)), "bad alignment"); 220 intptr_t *plen = (intptr_t*)(top); 221 top += sizeof(*plen); 222 223 int i; 224 for (i = 0; i < _table_size; ++i) { 225 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); 226 *p != NULL; 227 p = (*p)->next_addr()) { 228 *p = (BasicHashtableEntry<F>*)memcpy(top, (void*)*p, entry_size()); 229 top += entry_size(); 230 } 231 } 232 *plen = (char*)(top) - (char*)plen - sizeof(*plen); 233 assert(top == end, "count_bytes_for_table is wrong"); 234 // Set the shared bit. 235 236 for (i = 0; i < _table_size; ++i) { 237 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { 238 p->set_shared(); 239 } 240 } 241 } 242 243 // For oops and Strings the size of the literal is interesting. For other types, nobody cares. 244 static int literal_size(ConstantPool*) { return 0; } 245 static int literal_size(Klass*) { return 0; } 246 #if INCLUDE_ALL_GCS 247 static int literal_size(nmethod*) { return 0; } 248 #endif 249 250 static int literal_size(Symbol *symbol) { 251 return symbol->size() * HeapWordSize; 252 } 253 254 static int literal_size(oop obj) { 255 // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, 256 // and the String.value array is shared by several Strings. However, starting from JDK8, 257 // the String.value array is not shared anymore. 258 if (obj == NULL) { 259 return 0; 260 } else if (obj->klass() == SystemDictionary::String_klass()) { 261 return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize; 262 } else { 263 return obj->size(); 264 } 265 } 266 267 static int literal_size(ClassLoaderWeakHandle v) { 268 return literal_size(v.peek()); 269 } 270 271 template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) { 272 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 273 274 // Allocate new buckets 275 HashtableBucket<F>* buckets_new = NEW_C_HEAP_ARRAY2_RETURN_NULL(HashtableBucket<F>, new_size, F, CURRENT_PC); 276 if (buckets_new == NULL) { 277 return false; 278 } 279 280 // Clear the new buckets 281 for (int i = 0; i < new_size; i++) { 282 buckets_new[i].clear(); 283 } 284 285 int table_size_old = _table_size; 286 // hash_to_index() uses _table_size, so switch the sizes now 287 _table_size = new_size; 288 289 // Move entries from the old table to a new table 290 for (int index_old = 0; index_old < table_size_old; index_old++) { 291 for (BasicHashtableEntry<F>* p = _buckets[index_old].get_entry(); p != NULL; ) { 292 BasicHashtableEntry<F>* next = p->next(); 293 bool keep_shared = p->is_shared(); 294 int index_new = hash_to_index(p->hash()); 295 296 p->set_next(buckets_new[index_new].get_entry()); 297 buckets_new[index_new].set_entry(p); 298 299 if (keep_shared) { 300 p->set_shared(); 301 } 302 p = next; 303 } 304 } 305 306 // The old backets now can be released 307 BasicHashtable<F>::free_buckets(); 308 309 // Switch to the new storage 310 _buckets = buckets_new; 311 312 return true; 313 } 314 315 // Dump footprint and bucket length statistics 316 // 317 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to 318 // add a new function static int literal_size(MyNewType lit) 319 // because I can't get template <class T> int literal_size(T) to pick the specializations for Symbol and oop. 320 // 321 // The StringTable and SymbolTable dumping print how much footprint is used by the String and Symbol 322 // literals. 323 324 template <class T, MEMFLAGS F> void Hashtable<T, F>::print_table_statistics(outputStream* st, 325 const char *table_name) { 326 NumberSeq summary; 327 int literal_bytes = 0; 328 for (int i = 0; i < this->table_size(); ++i) { 329 int count = 0; 330 for (HashtableEntry<T, F>* e = this->bucket(i); 331 e != NULL; e = e->next()) { 332 count++; 333 literal_bytes += literal_size(e->literal()); 334 } 335 summary.add((double)count); 336 } 337 double num_buckets = summary.num(); 338 double num_entries = summary.sum(); 339 340 int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>); 341 int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>); 342 int total_bytes = literal_bytes + bucket_bytes + entry_bytes; 343 344 int bucket_size = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); 345 int entry_size = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); 346 347 st->print_cr("%s statistics:", table_name); 348 st->print_cr("Number of buckets : %9d = %9d bytes, each %d", (int)num_buckets, bucket_bytes, bucket_size); 349 st->print_cr("Number of entries : %9d = %9d bytes, each %d", (int)num_entries, entry_bytes, entry_size); 350 if (literal_bytes != 0) { 351 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); 352 st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); 353 } 354 st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); 355 st->print_cr("Average bucket size : %9.3f", summary.avg()); 356 st->print_cr("Variance of bucket size : %9.3f", summary.variance()); 357 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); 358 st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); 359 } 360 361 362 // Dump the hash table buckets. 363 364 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() { 365 size_t bytes = 0; 366 bytes += sizeof(intptr_t); // len 367 bytes += sizeof(intptr_t); // _number_of_entries 368 bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets 369 370 return bytes; 371 } 372 373 // Dump the buckets (into CDS archive) 374 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) { 375 assert(is_aligned(top, sizeof(intptr_t)), "bad alignment"); 376 intptr_t len = _table_size * sizeof(HashtableBucket<F>); 377 *(intptr_t*)(top) = len; 378 top += sizeof(intptr_t); 379 380 *(intptr_t*)(top) = _number_of_entries; 381 top += sizeof(intptr_t); 382 383 _buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len); 384 top += len; 385 386 assert(top == end, "count_bytes_for_buckets is wrong"); 387 } 388 389 #ifndef PRODUCT 390 template <class T> void print_literal(T l) { 391 l->print(); 392 } 393 394 static void print_literal(ClassLoaderWeakHandle l) { 395 l.print(); 396 } 397 398 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() { 399 ResourceMark rm; 400 401 for (int i = 0; i < BasicHashtable<F>::table_size(); i++) { 402 HashtableEntry<T, F>* entry = bucket(i); 403 while(entry != NULL) { 404 tty->print("%d : ", i); 405 print_literal(entry->literal()); 406 tty->cr(); 407 entry = entry->next(); 408 } 409 } 410 } 411 412 template <MEMFLAGS F> 413 template <class T> void BasicHashtable<F>::verify_table(const char* table_name) { 414 int element_count = 0; 415 int max_bucket_count = 0; 416 int max_bucket_number = 0; 417 for (int index = 0; index < table_size(); index++) { 418 int bucket_count = 0; 419 for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) { 420 probe->verify(); 421 bucket_count++; 422 } 423 element_count += bucket_count; 424 if (bucket_count > max_bucket_count) { 425 max_bucket_count = bucket_count; 426 max_bucket_number = index; 427 } 428 } 429 guarantee(number_of_entries() == element_count, 430 "Verify of %s failed", table_name); 431 432 // Log some statistics about the hashtable 433 log_info(hashtables)("%s max bucket size %d bucket %d element count %d table size %d", table_name, 434 max_bucket_count, max_bucket_number, _number_of_entries, _table_size); 435 if (_number_of_entries > 0 && log_is_enabled(Debug, hashtables)) { 436 for (int index = 0; index < table_size(); index++) { 437 int bucket_count = 0; 438 for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) { 439 log_debug(hashtables)("bucket %d hash " INTPTR_FORMAT, index, (intptr_t)probe->hash()); 440 bucket_count++; 441 } 442 if (bucket_count > 0) { 443 log_debug(hashtables)("bucket %d count %d", index, bucket_count); 444 } 445 } 446 } 447 } 448 #endif // PRODUCT 449 450 // Explicitly instantiate these types 451 #if INCLUDE_ALL_GCS 452 template class Hashtable<nmethod*, mtGC>; 453 template class HashtableEntry<nmethod*, mtGC>; 454 template class BasicHashtable<mtGC>; 455 #endif 456 template class Hashtable<ConstantPool*, mtClass>; 457 template class RehashableHashtable<Symbol*, mtSymbol>; 458 template class RehashableHashtable<oop, mtSymbol>; 459 template class Hashtable<Symbol*, mtSymbol>; 460 template class Hashtable<Klass*, mtClass>; 461 template class Hashtable<InstanceKlass*, mtClass>; 462 template class Hashtable<ClassLoaderWeakHandle, mtClass>; 463 template class Hashtable<Symbol*, mtModule>; 464 template class Hashtable<oop, mtSymbol>; 465 template class Hashtable<ClassLoaderWeakHandle, mtSymbol>; 466 template class Hashtable<Symbol*, mtClass>; 467 template class HashtableEntry<Symbol*, mtSymbol>; 468 template class HashtableEntry<Symbol*, mtClass>; 469 template class HashtableEntry<oop, mtSymbol>; 470 template class HashtableEntry<ClassLoaderWeakHandle, mtSymbol>; 471 template class HashtableBucket<mtClass>; 472 template class BasicHashtableEntry<mtSymbol>; 473 template class BasicHashtableEntry<mtCode>; 474 template class BasicHashtable<mtClass>; 475 template class BasicHashtable<mtClassShared>; 476 template class BasicHashtable<mtSymbol>; 477 template class BasicHashtable<mtCode>; 478 template class BasicHashtable<mtInternal>; 479 template class BasicHashtable<mtModule>; 480 #if INCLUDE_TRACE 481 template class Hashtable<Symbol*, mtTracing>; 482 template class HashtableEntry<Symbol*, mtTracing>; 483 template class BasicHashtable<mtTracing>; 484 #endif 485 template class BasicHashtable<mtCompiler>; 486 487 template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*); 488 template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*); 489 template void BasicHashtable<mtModule>::verify_table<PackageEntry>(char const*); 490 template void BasicHashtable<mtClass>::verify_table<ProtectionDomainCacheEntry>(char const*); 491 template void BasicHashtable<mtClass>::verify_table<PlaceholderEntry>(char const*);