src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page

        

@@ -277,14 +277,14 @@
     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
     _bt.single_block(mr.start(), mr.word_size());
     FreeChunk* fc = (FreeChunk*) mr.start();
     fc->set_size(mr.word_size());
     if (mr.word_size() >= IndexSetSize ) {
-      returnChunkToDictionary(fc);
+      returnChunkToDictionary(fc, true);
     } else {
       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-      _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
+      _indexedFreeList[mr.word_size()].return_chunk_at_head(fc, true, true);
     }
   }
   _promoInfo.reset();
   _smallLinearAllocBlock._ptr = NULL;
   _smallLinearAllocBlock._word_size = 0;

@@ -1476,11 +1476,11 @@
     // for all chunks added back to the indexed free lists.
     if (sz < SmallForDictionary) {
       _bt.allocated(blk->_ptr, sz);
     }
     // Return the chunk that isn't big enough, and then refill below.
-    addChunkToFreeLists(blk->_ptr, sz);
+    addChunkToFreeLists(blk->_ptr, sz, true);
     split_birth(sz);
     // Don't keep statistics on adding back chunk from a LinAB.
   } else {
     // A refilled block would not satisfy the request.
     return NULL;

@@ -1613,11 +1613,11 @@
                i++) {
             curFc->set_size(size);
             // Don't record this as a return in order to try and
             // determine the "returns" from a GC.
             _bt.verify_not_unallocated((HeapWord*) fc, size);
-            _indexedFreeList[size].return_chunk_at_tail(curFc, false);
+            _indexedFreeList[size].return_chunk_at_tail(curFc, false, true);
             _bt.mark_block((HeapWord*)curFc, size);
             split_birth(size);
             // Don't record the initial population of the indexed list
             // as a split birth.
           }

@@ -1674,11 +1674,11 @@
     return fc;
   }
   assert(fc->size() > size, "get_chunk() guarantee");
   if (fc->size() < size + MinChunkSize) {
     // Return the chunk to the dictionary and go get a bigger one.
-    returnChunkToDictionary(fc);
+    returnChunkToDictionary(fc, true);
     fc = _dictionary->get_chunk(size + MinChunkSize);
     if (fc == NULL) {
       return NULL;
     }
     _bt.allocated((HeapWord*)fc, fc->size());

@@ -1689,37 +1689,37 @@
   _bt.verify_single_block((HeapWord*)fc, size);
   return fc;
 }
 
 void
-CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
+CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk, bool deallocate_pages) {
   assert_locked();
 
   size_t size = chunk->size();
   _bt.verify_single_block((HeapWord*)chunk, size);
   // adjust _unallocated_block downward, as necessary
   _bt.freed((HeapWord*)chunk, size);
-  _dictionary->return_chunk(chunk);
+  _dictionary->return_chunk(chunk, deallocate_pages);
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
     tl->verify_stats();
   }
 #endif // PRODUCT
 }
 
 void
-CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
+CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc, bool deallocate_pages) {
   assert_locked();
   size_t size = fc->size();
   _bt.verify_single_block((HeapWord*) fc, size);
   _bt.verify_not_unallocated((HeapWord*) fc, size);
   if (_adaptive_freelists) {
-    _indexedFreeList[size].return_chunk_at_tail(fc);
+    _indexedFreeList[size].return_chunk_at_tail(fc, true, deallocate_pages);
   } else {
-    _indexedFreeList[size].return_chunk_at_head(fc);
+    _indexedFreeList[size].return_chunk_at_head(fc, true, deallocate_pages);
   }
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
      _indexedFreeList[size].verify_stats();
   }

@@ -1760,46 +1760,47 @@
   debug_only(ec->mangleFreed(size));
   if (size < SmallForDictionary) {
     lock = _indexedFreeListParLocks[size];
   }
   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
-  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
+  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true, true);
   // record the birth under the lock since the recording involves
   // manipulation of the list on which the chunk lives and
   // if the chunk is allocated and is the last on the list,
   // the list can go away.
   coalBirth(size);
 }
 
 void
 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
-                                              size_t     size) {
+                                              size_t     size,
+                                              bool deallocate_pages) {
   // check that the chunk does lie in this space!
   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
   assert_locked();
   _bt.verify_single_block(chunk, size);
 
   FreeChunk* fc = (FreeChunk*) chunk;
   fc->set_size(size);
   debug_only(fc->mangleFreed(size));
   if (size < SmallForDictionary) {
-    returnChunkToFreeList(fc);
+    returnChunkToFreeList(fc, deallocate_pages);
   } else {
-    returnChunkToDictionary(fc);
+    returnChunkToDictionary(fc, deallocate_pages);
   }
 }
 
 void
 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
-  size_t size, bool coalesced) {
+  size_t size, bool coalesced, bool deallocate_pages) {
   assert_locked();
   assert(chunk != NULL, "null chunk");
   if (coalesced) {
     // repair BOT
     _bt.single_block(chunk, size);
   }
-  addChunkToFreeLists(chunk, size);
+  addChunkToFreeLists(chunk, size, deallocate_pages);
 }
 
 // We _must_ find the purported chunk on our free lists;
 // we assert if we don't.
 void

@@ -1919,15 +1920,23 @@
     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
     assert(!is_par ||
            (SharedHeap::heap()->n_par_threads() ==
             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
-    returnChunkToFreeList(ffc);
+    // We don't need to deallocate pages here because a chunk is split
+    // and its pages are already deallocated at this point. Otherwise,
+    // we would be wastefully calling a system call to deallocate
+    // pages especially at every OG allocation after a compacting GC.
+    returnChunkToFreeList(ffc, false);
     split(size, rem_size);
     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
   } else {
-    returnChunkToDictionary(ffc);
+    // We don't need to deallocate pages here because a chunk is split
+    // and its pages are already deallocated at this point. Otherwise,
+    // we would be wastefully calling a system call to deallocate
+    // pages especially at every OG allocation after a compacting GC.
+    returnChunkToDictionary(ffc, false);
     split(size ,rem_size);
   }
   chunk->set_size(new_size);
   return chunk;
 }

@@ -2807,11 +2816,21 @@
               assert(fc_size == i*word_sz, "Error");
               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
               _bt.verify_single_block((HeapWord*)fc, fc_size);
               _bt.verify_single_block((HeapWord*)ffc, word_sz);
               // Push this on "fl".
-              fl->return_chunk_at_head(ffc);
+              //
+              // Note: deallocate_pages=false (3rd arg). No need to
+              // deallocate pages for the free chunk when it's returned
+              // back to the free lists because the original free chunk
+              // comes from the free lists and pages are already
+              // deallocated. Since this function is called frequently
+              // for each promotion of an object, this helps in speed by
+              // avoiding unnecessary system calls
+              // (os::deallocate_pages()). This applies to the other
+              // returnChunk... calls in this function as well.
+              fl->return_chunk_at_head(ffc, true, false);
             }
             // TRAP
             assert(fl->tail()->next() == NULL, "List invariant.");
           }
         }

@@ -2864,11 +2883,11 @@
     // If n is 0, the chunk fc that was found is not large
     // enough to leave a viable remainder.  We are unable to
     // allocate even one block.  Return fc to the
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
-      returnChunkToDictionary(fc);
+      returnChunkToDictionary(fc, false);
       assert(fl->count() == 0, "We never allocated any blocks");
       return;
     }
 
     // First return the remainder, if any.

@@ -2887,22 +2906,22 @@
       OrderAccess::storestore();
       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
       assert(fc->is_free(), "Error");
       fc->set_size(prefix_size);
       if (rem >= IndexSetSize) {
-        returnChunkToDictionary(rem_fc);
+        returnChunkToDictionary(rem_fc, false);
         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
         rem_fc = NULL;
       }
       // Otherwise, return it to the small list below.
     }
   }
   if (rem_fc != NULL) {
     MutexLockerEx x(_indexedFreeListParLocks[rem],
                     Mutex::_no_safepoint_check_flag);
     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
-    _indexedFreeList[rem].return_chunk_at_head(rem_fc);
+    _indexedFreeList[rem].return_chunk_at_head(rem_fc, true, false);
     smallSplitBirth(rem);
   }
   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to

@@ -2922,21 +2941,21 @@
     fc_size -= word_sz;
     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
     _bt.verify_single_block((HeapWord*)fc, fc_size);
     // Push this on "fl".
-    fl->return_chunk_at_head(ffc);
+    fl->return_chunk_at_head(ffc, true, false);
   }
   // First chunk
   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
   // The blocks above should show their new sizes before the first block below
   fc->set_size(word_sz);
   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
   fc->link_next(NULL);
   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   _bt.verify_single_block((HeapWord*)fc, fc->size());
-  fl->return_chunk_at_head(fc);
+  fl->return_chunk_at_head(fc, true, false);
 
   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
   {
     // Update the stats for this block size.
     MutexLockerEx x(_indexedFreeListParLocks[word_sz],