< prev index next >

src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp

Print this page




  75 // reuse of table entries to lower the pressure on the underlying allocator.
  76 // But more importantly, it provides fast/deferred freeing of table entries. This
  77 // is important because freeing of table entries is done during stop-the-world
  78 // phases and it is not uncommon for large number of entries to be freed at once.
  79 // Tables entries that are freed during these phases are placed onto a freelist in
  80 // the cache. The deduplication thread, which executes in a concurrent phase, will
  81 // later reuse or free the underlying memory for these entries.
  82 //
  83 // The cache allows for single-threaded allocations and multi-threaded frees.
  84 // Allocations are synchronized by StringDedupTable_lock as part of a table
  85 // modification.
  86 //
  87 class G1StringDedupEntryCache : public CHeapObj<mtGC> {
  88 private:
  89   // One freelist per GC worker to allow lock less freeing of
  90   // entries while doing a parallel scan of the table. Using
  91   // PaddedEnd to avoid false sharing.
  92   PaddedEnd<G1StringDedupEntryFreeList>* _lists;
  93   size_t                                 _nlists;
  94 



  95 public:
  96   G1StringDedupEntryCache();
  97   ~G1StringDedupEntryCache();
  98 
  99   // Get a table entry from the cache freelist, or allocate a new
 100   // entry if the cache is empty.
 101   G1StringDedupEntry* alloc();
 102 
 103   // Insert a table entry into the cache freelist.
 104   void free(G1StringDedupEntry* entry, uint worker_id);
 105 
 106   // Returns current number of entries in the cache.
 107   size_t size();
 108 
 109   // If the cache has grown above the given max size, trim it down
 110   // and deallocate the memory occupied by trimmed of entries.
 111   void trim(size_t max_size);
 112 };
 113 
 114 G1StringDedupEntryCache::G1StringDedupEntryCache() {
 115   _nlists = MAX2(ParallelGCThreads, (size_t)1);
 116   _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
 117 }
 118 
 119 G1StringDedupEntryCache::~G1StringDedupEntryCache() {
 120   ShouldNotReachHere();
 121 }
 122 
 123 G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
 124   for (size_t i = 0; i < _nlists; i++) {
 125     G1StringDedupEntry* entry = _lists[i].remove();
 126     if (entry != NULL) {
 127       return entry;
 128     }
 129   }
 130   return new G1StringDedupEntry();
 131 }
 132 
 133 void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
 134   assert(entry->obj() != NULL, "Double free");
 135   assert(worker_id < _nlists, "Invalid worker id");
 136   entry->set_obj(NULL);
 137   entry->set_hash(0);
 138   _lists[worker_id].add(entry);
 139 }
 140 




  75 // reuse of table entries to lower the pressure on the underlying allocator.
  76 // But more importantly, it provides fast/deferred freeing of table entries. This
  77 // is important because freeing of table entries is done during stop-the-world
  78 // phases and it is not uncommon for large number of entries to be freed at once.
  79 // Tables entries that are freed during these phases are placed onto a freelist in
  80 // the cache. The deduplication thread, which executes in a concurrent phase, will
  81 // later reuse or free the underlying memory for these entries.
  82 //
  83 // The cache allows for single-threaded allocations and multi-threaded frees.
  84 // Allocations are synchronized by StringDedupTable_lock as part of a table
  85 // modification.
  86 //
  87 class G1StringDedupEntryCache : public CHeapObj<mtGC> {
  88 private:
  89   // One freelist per GC worker to allow lock less freeing of
  90   // entries while doing a parallel scan of the table. Using
  91   // PaddedEnd to avoid false sharing.
  92   PaddedEnd<G1StringDedupEntryFreeList>* _lists;
  93   size_t                                 _nlists;
  94 
  95   // Never called.
  96   ~G1StringDedupEntryCache();
  97 
  98 public:
  99   G1StringDedupEntryCache();

 100 
 101   // Get a table entry from the cache freelist, or allocate a new
 102   // entry if the cache is empty.
 103   G1StringDedupEntry* alloc();
 104 
 105   // Insert a table entry into the cache freelist.
 106   void free(G1StringDedupEntry* entry, uint worker_id);
 107 
 108   // Returns current number of entries in the cache.
 109   size_t size();
 110 
 111   // If the cache has grown above the given max size, trim it down
 112   // and deallocate the memory occupied by trimmed of entries.
 113   void trim(size_t max_size);
 114 };
 115 
 116 G1StringDedupEntryCache::G1StringDedupEntryCache() {
 117   _nlists = MAX2(ParallelGCThreads, (size_t)1);
 118   _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);




 119 }
 120 
 121 G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
 122   for (size_t i = 0; i < _nlists; i++) {
 123     G1StringDedupEntry* entry = _lists[i].remove();
 124     if (entry != NULL) {
 125       return entry;
 126     }
 127   }
 128   return new G1StringDedupEntry();
 129 }
 130 
 131 void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
 132   assert(entry->obj() != NULL, "Double free");
 133   assert(worker_id < _nlists, "Invalid worker id");
 134   entry->set_obj(NULL);
 135   entry->set_hash(0);
 136   _lists[worker_id].add(entry);
 137 }
 138 


< prev index next >