< prev index next >

src/hotspot/share/gc/shared/space.inline.hpp

Print this page




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  27 
  28 #include "gc/serial/markSweep.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/generation.hpp"
  31 #include "gc/shared/space.hpp"
  32 #include "gc/shared/spaceDecorator.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oopsHierarchy.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "runtime/prefetch.inline.hpp"
  37 #include "runtime/safepoint.hpp"



  38 
  39 inline HeapWord* Space::block_start(const void* p) {
  40   return block_start_const(p);
  41 }
  42 
  43 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  44   HeapWord* res = ContiguousSpace::allocate(size);
  45   if (res != NULL) {
  46     _offsets.alloc_block(res, size);
  47   }
  48   return res;
  49 }
  50 
  51 // Because of the requirement of keeping "_offsets" up to date with the
  52 // allocations, we sequentialize these with a lock.  Therefore, best if
  53 // this is used for larger LAB allocations only.
  54 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  55   MutexLocker x(&_par_alloc_lock);
  56   // This ought to be just "allocate", because of the lock above, but that
  57   // ContiguousSpace::allocate asserts that either the allocating thread


  60   // meaning "locking at safepoint taken care of", and set/reset that
  61   // here.  But this will do for now, especially in light of the comment
  62   // above.  Perhaps in the future some lock-free manner of keeping the
  63   // coordination.
  64   HeapWord* res = ContiguousSpace::par_allocate(size);
  65   if (res != NULL) {
  66     _offsets.alloc_block(res, size);
  67   }
  68   return res;
  69 }
  70 
  71 inline HeapWord*
  72 OffsetTableContigSpace::block_start_const(const void* p) const {
  73   return _offsets.block_start(p);
  74 }
  75 
  76 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
  77   return oop(addr)->size();
  78 }
  79 


  80 class DeadSpacer : StackObj {
  81   size_t _allowed_deadspace_words;
  82   bool _active;
  83   CompactibleSpace* _space;
  84 
  85 public:
  86   DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) {
  87     size_t ratio = _space->allowed_dead_ratio();
  88     _active = ratio > 0;
  89 
  90     if (_active) {
  91       assert(!UseG1GC, "G1 should not be using dead space");
  92 
  93       // We allow some amount of garbage towards the bottom of the space, so
  94       // we don't start compacting before there is a significant gain to be made.
  95       // Occasionally, we want to ensure a full compaction, which is determined
  96       // by the MarkSweepAlwaysCompactCount parameter.
  97       if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
  98         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
  99       } else {


 329       // size and destination
 330       size_t size = space->obj_size(cur_obj);
 331       HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
 332 
 333       // prefetch beyond compaction_top
 334       Prefetch::write(compaction_top, copy_interval);
 335 
 336       // copy object and reinit its mark
 337       assert(cur_obj != compaction_top, "everything in this pass should be moving");
 338       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
 339       oop(compaction_top)->init_mark_raw();
 340       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 341 
 342       debug_only(prev_obj = cur_obj);
 343       cur_obj += size;
 344     }
 345   }
 346 
 347   clear_empty_region(space);
 348 }


 349 
 350 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
 351   return oop(addr)->size();
 352 }
 353 
 354 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  27 

  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "gc/shared/generation.hpp"
  30 #include "gc/shared/space.hpp"
  31 #include "gc/shared/spaceDecorator.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oopsHierarchy.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/prefetch.inline.hpp"
  36 #include "runtime/safepoint.hpp"
  37 #if INCLUDE_SERIALGC
  38 #include "gc/serial/markSweep.inline.hpp"
  39 #endif
  40 
  41 inline HeapWord* Space::block_start(const void* p) {
  42   return block_start_const(p);
  43 }
  44 
  45 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  46   HeapWord* res = ContiguousSpace::allocate(size);
  47   if (res != NULL) {
  48     _offsets.alloc_block(res, size);
  49   }
  50   return res;
  51 }
  52 
  53 // Because of the requirement of keeping "_offsets" up to date with the
  54 // allocations, we sequentialize these with a lock.  Therefore, best if
  55 // this is used for larger LAB allocations only.
  56 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  57   MutexLocker x(&_par_alloc_lock);
  58   // This ought to be just "allocate", because of the lock above, but that
  59   // ContiguousSpace::allocate asserts that either the allocating thread


  62   // meaning "locking at safepoint taken care of", and set/reset that
  63   // here.  But this will do for now, especially in light of the comment
  64   // above.  Perhaps in the future some lock-free manner of keeping the
  65   // coordination.
  66   HeapWord* res = ContiguousSpace::par_allocate(size);
  67   if (res != NULL) {
  68     _offsets.alloc_block(res, size);
  69   }
  70   return res;
  71 }
  72 
  73 inline HeapWord*
  74 OffsetTableContigSpace::block_start_const(const void* p) const {
  75   return _offsets.block_start(p);
  76 }
  77 
  78 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
  79   return oop(addr)->size();
  80 }
  81 
  82 #if INCLUDE_SERIALGC
  83 
  84 class DeadSpacer : StackObj {
  85   size_t _allowed_deadspace_words;
  86   bool _active;
  87   CompactibleSpace* _space;
  88 
  89 public:
  90   DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) {
  91     size_t ratio = _space->allowed_dead_ratio();
  92     _active = ratio > 0;
  93 
  94     if (_active) {
  95       assert(!UseG1GC, "G1 should not be using dead space");
  96 
  97       // We allow some amount of garbage towards the bottom of the space, so
  98       // we don't start compacting before there is a significant gain to be made.
  99       // Occasionally, we want to ensure a full compaction, which is determined
 100       // by the MarkSweepAlwaysCompactCount parameter.
 101       if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
 102         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
 103       } else {


 333       // size and destination
 334       size_t size = space->obj_size(cur_obj);
 335       HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
 336 
 337       // prefetch beyond compaction_top
 338       Prefetch::write(compaction_top, copy_interval);
 339 
 340       // copy object and reinit its mark
 341       assert(cur_obj != compaction_top, "everything in this pass should be moving");
 342       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
 343       oop(compaction_top)->init_mark_raw();
 344       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 345 
 346       debug_only(prev_obj = cur_obj);
 347       cur_obj += size;
 348     }
 349   }
 350 
 351   clear_empty_region(space);
 352 }
 353 
 354 #endif // INCLUDE_SERIALGC
 355 
 356 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
 357   return oop(addr)->size();
 358 }
 359 
 360 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
< prev index next >