< prev index next >

src/share/vm/utilities/hashtable.cpp

Print this page




  67       _end_block = _first_free_entry + len;
  68     }
  69     entry = (BasicHashtableEntry<F>*)_first_free_entry;
  70     _first_free_entry += _entry_size;
  71   }
  72 
  73   assert(_entry_size % HeapWordSize == 0, "");
  74   entry->set_hash(hashValue);
  75   return entry;
  76 }
  77 
  78 
  79 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
  80   HashtableEntry<T, F>* entry;
  81 
  82   entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
  83   entry->set_literal(obj);
  84   return entry;
  85 }
  86 











  87 // Check to see if the hashtable is unbalanced.  The caller set a flag to
  88 // rehash at the next safepoint.  If this bucket is 60 times greater than the
  89 // expected average bucket length, it's an unbalanced hashtable.
  90 // This is somewhat an arbitrary heuristic but if one bucket gets to
  91 // rehash_count which is currently 100, there's probably something wrong.
  92 
  93 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
  94   assert(this->table_size() != 0, "underflow");
  95   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
  96     // Set a flag for the next safepoint, which should be at some guaranteed
  97     // safepoint interval.
  98     return true;
  99   }
 100   return false;
 101 }
 102 
 103 // Create a new table and using alternate hash code, populate the new table
 104 // with the existing elements.   This can be used to change the hash code
 105 // and could in the future change the size of the table.
 106 


 340         log_debug(hashtables)("bucket %d count %d", index, bucket_count);
 341       }
 342     }
 343   }
 344 }
 345 #endif // PRODUCT
 346 
 347 // Explicitly instantiate these types
 348 #if INCLUDE_ALL_GCS
 349 template class Hashtable<nmethod*, mtGC>;
 350 template class HashtableEntry<nmethod*, mtGC>;
 351 template class BasicHashtable<mtGC>;
 352 #endif
 353 template class Hashtable<ConstantPool*, mtClass>;
 354 template class RehashableHashtable<Symbol*, mtSymbol>;
 355 template class RehashableHashtable<oopDesc*, mtSymbol>;
 356 template class Hashtable<Symbol*, mtSymbol>;
 357 template class Hashtable<Klass*, mtClass>;
 358 template class Hashtable<InstanceKlass*, mtClass>;
 359 template class Hashtable<oop, mtClass>;

 360 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
 361 template class Hashtable<oop, mtSymbol>;
 362 template class RehashableHashtable<oop, mtSymbol>;
 363 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
 364 template class Hashtable<oopDesc*, mtSymbol>;
 365 template class Hashtable<Symbol*, mtClass>;
 366 template class HashtableEntry<Symbol*, mtSymbol>;
 367 template class HashtableEntry<Symbol*, mtClass>;
 368 template class HashtableEntry<oop, mtSymbol>;
 369 template class BasicHashtableEntry<mtSymbol>;
 370 template class BasicHashtableEntry<mtCode>;
 371 template class BasicHashtable<mtClass>;
 372 template class BasicHashtable<mtClassShared>;
 373 template class BasicHashtable<mtSymbol>;
 374 template class BasicHashtable<mtCode>;
 375 template class BasicHashtable<mtInternal>;
 376 template class BasicHashtable<mtModule>;
 377 #if INCLUDE_TRACE
 378 template class Hashtable<Symbol*, mtTracing>;
 379 template class HashtableEntry<Symbol*, mtTracing>;


  67       _end_block = _first_free_entry + len;
  68     }
  69     entry = (BasicHashtableEntry<F>*)_first_free_entry;
  70     _first_free_entry += _entry_size;
  71   }
  72 
  73   assert(_entry_size % HeapWordSize == 0, "");
  74   entry->set_hash(hashValue);
  75   return entry;
  76 }
  77 
  78 
  79 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
  80   HashtableEntry<T, F>* entry;
  81 
  82   entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
  83   entry->set_literal(obj);
  84   return entry;
  85 }
  86 
  87 // Version of hashtable entry allocation that allocates in the C heap directly.
  88 // The allocator in blocks is preferable but doesn't have free semantics.
  89 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) {
  90   HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F);
  91 
  92   entry->set_hash(hashValue);
  93   entry->set_literal(obj);
  94   entry->set_next(NULL);
  95   return entry;
  96 }
  97 
  98 // Check to see if the hashtable is unbalanced.  The caller set a flag to
  99 // rehash at the next safepoint.  If this bucket is 60 times greater than the
 100 // expected average bucket length, it's an unbalanced hashtable.
 101 // This is somewhat an arbitrary heuristic but if one bucket gets to
 102 // rehash_count which is currently 100, there's probably something wrong.
 103 
 104 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
 105   assert(this->table_size() != 0, "underflow");
 106   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
 107     // Set a flag for the next safepoint, which should be at some guaranteed
 108     // safepoint interval.
 109     return true;
 110   }
 111   return false;
 112 }
 113 
 114 // Create a new table and using alternate hash code, populate the new table
 115 // with the existing elements.   This can be used to change the hash code
 116 // and could in the future change the size of the table.
 117 


 351         log_debug(hashtables)("bucket %d count %d", index, bucket_count);
 352       }
 353     }
 354   }
 355 }
 356 #endif // PRODUCT
 357 
 358 // Explicitly instantiate these types
 359 #if INCLUDE_ALL_GCS
 360 template class Hashtable<nmethod*, mtGC>;
 361 template class HashtableEntry<nmethod*, mtGC>;
 362 template class BasicHashtable<mtGC>;
 363 #endif
 364 template class Hashtable<ConstantPool*, mtClass>;
 365 template class RehashableHashtable<Symbol*, mtSymbol>;
 366 template class RehashableHashtable<oopDesc*, mtSymbol>;
 367 template class Hashtable<Symbol*, mtSymbol>;
 368 template class Hashtable<Klass*, mtClass>;
 369 template class Hashtable<InstanceKlass*, mtClass>;
 370 template class Hashtable<oop, mtClass>;
 371 template class Hashtable<Symbol*, mtModule>;
 372 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
 373 template class Hashtable<oop, mtSymbol>;
 374 template class RehashableHashtable<oop, mtSymbol>;
 375 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
 376 template class Hashtable<oopDesc*, mtSymbol>;
 377 template class Hashtable<Symbol*, mtClass>;
 378 template class HashtableEntry<Symbol*, mtSymbol>;
 379 template class HashtableEntry<Symbol*, mtClass>;
 380 template class HashtableEntry<oop, mtSymbol>;
 381 template class BasicHashtableEntry<mtSymbol>;
 382 template class BasicHashtableEntry<mtCode>;
 383 template class BasicHashtable<mtClass>;
 384 template class BasicHashtable<mtClassShared>;
 385 template class BasicHashtable<mtSymbol>;
 386 template class BasicHashtable<mtCode>;
 387 template class BasicHashtable<mtInternal>;
 388 template class BasicHashtable<mtModule>;
 389 #if INCLUDE_TRACE
 390 template class Hashtable<Symbol*, mtTracing>;
 391 template class HashtableEntry<Symbol*, mtTracing>;
< prev index next >