1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
  27 
  28 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
  29   HeapWord* res = ContiguousSpace::allocate(size);
  30   if (res != NULL) {
  31     _offsets.alloc_block(res, size);
  32   }
  33   return res;
  34 }
  35 
  36 // Because of the requirement of keeping "_offsets" up to date with the
  37 // allocations, we sequentialize these with a lock.  Therefore, best if
  38 // this is used for larger LAB allocations only.
  39 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
  40   MutexLocker x(&_par_alloc_lock);
  41   // Given that we take the lock no need to use par_allocate() here.
  42   HeapWord* res = ContiguousSpace::allocate(size);
  43   if (res != NULL) {
  44     _offsets.alloc_block(res, size);
  45   }
  46   return res;
  47 }
  48 
  49 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
  50   return _offsets.block_start(p);
  51 }
  52 
  53 inline HeapWord*
  54 G1OffsetTableContigSpace::block_start_const(const void* p) const {
  55   return _offsets.block_start_const(p);
  56 }
  57 
  58 inline void HeapRegion::note_start_of_marking() {
  59   _next_marked_bytes = 0;
  60   _next_top_at_mark_start = top();
  61 }
  62 
  63 inline void HeapRegion::note_end_of_marking() {
  64   _prev_top_at_mark_start = _next_top_at_mark_start;
  65   _prev_marked_bytes = _next_marked_bytes;
  66   _next_marked_bytes = 0;
  67 
  68   assert(_prev_marked_bytes <=
  69          (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
  70          HeapWordSize, "invariant");
  71 }
  72 
  73 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
  74   if (is_survivor()) {
  75     // This is how we always allocate survivors.
  76     assert(_next_top_at_mark_start == bottom(), "invariant");
  77   } else {
  78     if (during_initial_mark) {
  79       // During initial-mark we'll explicitly mark any objects on old
  80       // regions that are pointed to by roots. Given that explicit
  81       // marks only make sense under NTAMS it'd be nice if we could
  82       // check that condition if we wanted to. Given that we don't
  83       // know where the top of this region will end up, we simply set
  84       // NTAMS to the end of the region so all marks will be below
  85       // NTAMS. We'll set it to the actual top when we retire this region.
  86       _next_top_at_mark_start = end();
  87     } else {
  88       // We could have re-used this old region as to-space over a
  89       // couple of GCs since the start of the concurrent marking
  90       // cycle. This means that [bottom,NTAMS) will contain objects
  91       // copied up to and including initial-mark and [NTAMS, top)
  92       // will contain objects copied during the concurrent marking cycle.
  93       assert(top() >= _next_top_at_mark_start, "invariant");
  94     }
  95   }
  96 }
  97 
  98 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
  99   if (is_survivor()) {
 100     // This is how we always allocate survivors.
 101     assert(_next_top_at_mark_start == bottom(), "invariant");
 102   } else {
 103     if (during_initial_mark) {
 104       // See the comment for note_start_of_copying() for the details
 105       // on this.
 106       assert(_next_top_at_mark_start == end(), "pre-condition");
 107       _next_top_at_mark_start = top();
 108     } else {
 109       // See the comment for note_start_of_copying() for the details
 110       // on this.
 111       assert(top() >= _next_top_at_mark_start, "invariant");
 112     }
 113   }
 114 }
 115 
 116 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP