< prev index next >

src/hotspot/share/services/mallocSiteTable.hpp

Print this page
rev 50951 : imported patch static_initialization.patch


 230 
 231   // Walk this table.
 232   static bool walk_malloc_site(MallocSiteWalker* walker);
 233 
 234  private:
 235   static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
 236   static void reset();
 237 
 238   // Delete a bucket linked list
 239   static void delete_linked_list(MallocSiteHashtableEntry* head);
 240 
 241   static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
 242   static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
 243   static bool walk(MallocSiteWalker* walker);
 244 
 245   static inline unsigned int hash_to_index(unsigned int hash) {
 246     return (hash % table_size);
 247   }
 248 
 249   static inline const NativeCallStack* hash_entry_allocation_stack() {
 250     return (NativeCallStack*)_hash_entry_allocation_stack;






 251   }
 252 
 253  private:
 254   // Counter for counting concurrent access
 255   static volatile int                _access_count;
 256 
 257   // The callsite hashtable. It has to be a static table,
 258   // since malloc call can come from C runtime linker.
 259   static MallocSiteHashtableEntry*   _table[table_size];


 260 
 261 
 262   // Reserve enough memory for placing the objects
 263 
 264   // The memory for hashtable entry allocation stack object
 265   static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
 266   // The memory for hashtable entry allocation callsite object
 267   static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
 268   NOT_PRODUCT(static int     _peak_count;)
 269 };
 270 
 271 #endif // INCLUDE_NMT
 272 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP


 230 
 231   // Walk this table.
 232   static bool walk_malloc_site(MallocSiteWalker* walker);
 233 
 234  private:
 235   static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
 236   static void reset();
 237 
 238   // Delete a bucket linked list
 239   static void delete_linked_list(MallocSiteHashtableEntry* head);
 240 
 241   static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
 242   static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
 243   static bool walk(MallocSiteWalker* walker);
 244 
 245   static inline unsigned int hash_to_index(unsigned int hash) {
 246     return (hash % table_size);
 247   }
 248 
 249   static inline const NativeCallStack* hash_entry_allocation_stack() {
 250     assert(_hash_entry_allocation_stack != NULL, "Must be set");
 251     return _hash_entry_allocation_stack;
 252   }
 253 
 254   static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() {
 255     assert(_hash_entry_allocation_site != NULL, "Must be set");
 256     return _hash_entry_allocation_site;
 257   }
 258 
 259  private:
 260   // Counter for counting concurrent access
 261   static volatile int                _access_count;
 262 
 263   // The callsite hashtable. It has to be a static table,
 264   // since malloc call can come from C runtime linker.
 265   static MallocSiteHashtableEntry*        _table[table_size];
 266   static const NativeCallStack*           _hash_entry_allocation_stack;
 267   static const MallocSiteHashtableEntry*  _hash_entry_allocation_site;
 268 
 269 






 270   NOT_PRODUCT(static int     _peak_count;)
 271 };
 272 
 273 #endif // INCLUDE_NMT
 274 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
< prev index next >