< prev index next >

src/hotspot/share/gc/g1/g1AllocRegion.cpp

Print this page




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1AllocRegion.inline.hpp"
  27 #include "gc/g1/g1EvacStats.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"

  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/orderAccess.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
  36 HeapRegion* G1AllocRegion::_dummy_region = NULL;
  37 
  38 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
  39   assert(_dummy_region == NULL, "should be set once");
  40   assert(dummy_region != NULL, "pre-condition");
  41   assert(dummy_region->free() == 0, "pre-condition");
  42 
  43   // Make sure that any allocation attempt on this region will fail
  44   // and will not trigger any asserts.
  45   assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail");
  46   assert(dummy_region->allocate(1) == NULL, "should fail");
  47   DEBUG_ONLY(size_t assert_tmp);
  48   assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail");


  54 
  55 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
  56   assert(alloc_region != NULL && alloc_region != _dummy_region,
  57          "pre-condition");
  58   size_t result = 0;
  59 
  60   // Other threads might still be trying to allocate using a CAS out
  61   // of the region we are trying to retire, as they can do so without
  62   // holding the lock. So, we first have to make sure that noone else
  63   // can allocate out of it by doing a maximal allocation. Even if our
  64   // CAS attempt fails a few times, we'll succeed sooner or later
  65   // given that failed CAS attempts mean that the region is getting
  66   // closed to being full.
  67   size_t free_word_size = alloc_region->free() / HeapWordSize;
  68 
  69   // This is the minimum free chunk we can turn into a dummy
  70   // object. If the free space falls below this, then noone can
  71   // allocate in this region anyway (all allocation requests will be
  72   // of a size larger than this) so we won't have to perform the dummy
  73   // allocation.
  74   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
  75 
  76   while (free_word_size >= min_word_size_to_fill) {
  77     HeapWord* dummy = par_allocate(alloc_region, free_word_size);
  78     if (dummy != NULL) {
  79       // If the allocation was successful we should fill in the space.
  80       CollectedHeap::fill_with_object(dummy, free_word_size);
  81       alloc_region->set_pre_dummy_top(dummy);
  82       result += free_word_size * HeapWordSize;
  83       break;
  84     }
  85 
  86     free_word_size = alloc_region->free() / HeapWordSize;
  87     // It's also possible that someone else beats us to the
  88     // allocation and they fill up the region. In that case, we can
  89     // just get out of the loop.
  90   }
  91   result += alloc_region->free();
  92 
  93   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
  94          "post-condition");
  95   return result;
  96 }
  97 
  98 size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
  99   // We never have to check whether the active region is empty or not,
 100   // and potentially free it if it is, given that it's guaranteed that


 362   if (retired != NULL) {
 363     _stats->add_region_end_waste(end_waste / HeapWordSize);
 364   }
 365   return end_waste;
 366 }
 367 
 368 HeapRegion* OldGCAllocRegion::release() {
 369   HeapRegion* cur = get();
 370   if (cur != NULL) {
 371     // Determine how far we are from the next card boundary. If it is smaller than
 372     // the minimum object size we can allocate into, expand into the next card.
 373     HeapWord* top = cur->top();
 374     HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
 375 
 376     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
 377 
 378     if (to_allocate_words != 0) {
 379       // We are not at a card boundary. Fill up, possibly into the next, taking the
 380       // end of the region and the minimum object size into account.
 381       to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
 382                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
 383 
 384       // Skip allocation if there is not enough space to allocate even the smallest
 385       // possible object. In this case this region will not be retained, so the
 386       // original problem cannot occur.
 387       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
 388         HeapWord* dummy = attempt_allocation(to_allocate_words);
 389         CollectedHeap::fill_with_object(dummy, to_allocate_words);
 390       }
 391     }
 392   }
 393   return G1AllocRegion::release();
 394 }


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1AllocRegion.inline.hpp"
  27 #include "gc/g1/g1EvacStats.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/shared/fill.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "runtime/orderAccess.hpp"
  34 #include "utilities/align.hpp"
  35 
  36 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
  37 HeapRegion* G1AllocRegion::_dummy_region = NULL;
  38 
  39 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
  40   assert(_dummy_region == NULL, "should be set once");
  41   assert(dummy_region != NULL, "pre-condition");
  42   assert(dummy_region->free() == 0, "pre-condition");
  43 
  44   // Make sure that any allocation attempt on this region will fail
  45   // and will not trigger any asserts.
  46   assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail");
  47   assert(dummy_region->allocate(1) == NULL, "should fail");
  48   DEBUG_ONLY(size_t assert_tmp);
  49   assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail");


  55 
  56 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
  57   assert(alloc_region != NULL && alloc_region != _dummy_region,
  58          "pre-condition");
  59   size_t result = 0;
  60 
  61   // Other threads might still be trying to allocate using a CAS out
  62   // of the region we are trying to retire, as they can do so without
  63   // holding the lock. So, we first have to make sure that noone else
  64   // can allocate out of it by doing a maximal allocation. Even if our
  65   // CAS attempt fails a few times, we'll succeed sooner or later
  66   // given that failed CAS attempts mean that the region is getting
  67   // closed to being full.
  68   size_t free_word_size = alloc_region->free() / HeapWordSize;
  69 
  70   // This is the minimum free chunk we can turn into a dummy
  71   // object. If the free space falls below this, then noone can
  72   // allocate in this region anyway (all allocation requests will be
  73   // of a size larger than this) so we won't have to perform the dummy
  74   // allocation.
  75   size_t min_word_size_to_fill = Fill::min_size();
  76 
  77   while (free_word_size >= min_word_size_to_fill) {
  78     HeapWord* dummy = par_allocate(alloc_region, free_word_size);
  79     if (dummy != NULL) {
  80       // If the allocation was successful we should fill in the space.
  81       Fill::range(dummy, free_word_size);
  82       alloc_region->set_pre_dummy_top(dummy);
  83       result += free_word_size * HeapWordSize;
  84       break;
  85     }
  86 
  87     free_word_size = alloc_region->free() / HeapWordSize;
  88     // It's also possible that someone else beats us to the
  89     // allocation and they fill up the region. In that case, we can
  90     // just get out of the loop.
  91   }
  92   result += alloc_region->free();
  93 
  94   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
  95          "post-condition");
  96   return result;
  97 }
  98 
  99 size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
 100   // We never have to check whether the active region is empty or not,
 101   // and potentially free it if it is, given that it's guaranteed that


 363   if (retired != NULL) {
 364     _stats->add_region_end_waste(end_waste / HeapWordSize);
 365   }
 366   return end_waste;
 367 }
 368 
 369 HeapRegion* OldGCAllocRegion::release() {
 370   HeapRegion* cur = get();
 371   if (cur != NULL) {
 372     // Determine how far we are from the next card boundary. If it is smaller than
 373     // the minimum object size we can allocate into, expand into the next card.
 374     HeapWord* top = cur->top();
 375     HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
 376 
 377     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
 378 
 379     if (to_allocate_words != 0) {
 380       // We are not at a card boundary. Fill up, possibly into the next, taking the
 381       // end of the region and the minimum object size into account.
 382       to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
 383                                MAX2(to_allocate_words, Fill::min_size()));
 384 
 385       // Skip allocation if there is not enough space to allocate even the smallest
 386       // possible object. In this case this region will not be retained, so the
 387       // original problem cannot occur.
 388       if (to_allocate_words >= Fill::min_size()) {
 389         HeapWord* dummy = attempt_allocation(to_allocate_words);
 390         Fill::range(dummy, to_allocate_words);
 391       }
 392     }
 393   }
 394   return G1AllocRegion::release();
 395 }
< prev index next >