1 /*
   2  * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 inline HeapWord* Space::block_start(const void* p) {
  26   return block_start_const(p);
  27 }
  28 
  29 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  30   HeapWord* res = ContiguousSpace::allocate(size);
  31   if (res != NULL) {
  32     _offsets.alloc_block(res, size);
  33   }
  34   return res;
  35 }
  36 
  37 // Because of the requirement of keeping "_offsets" up to date with the
  38 // allocations, we sequentialize these with a lock.  Therefore, best if
  39 // this is used for larger LAB allocations only.
  40 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  41   MutexLocker x(&_par_alloc_lock);
  42   // This ought to be just "allocate", because of the lock above, but that
  43   // ContiguousSpace::allocate asserts that either the allocating thread
  44   // holds the heap lock or it is the VM thread and we're at a safepoint.
  45   // The best I (dld) could figure was to put a field in ContiguousSpace
  46   // meaning "locking at safepoint taken care of", and set/reset that
  47   // here.  But this will do for now, especially in light of the comment
  48   // above.  Perhaps in the future some lock-free manner of keeping the
  49   // coordination.
  50   HeapWord* res = ContiguousSpace::par_allocate(size);
  51   if (res != NULL) {
  52     _offsets.alloc_block(res, size);
  53   }
  54   return res;
  55 }
  56 
  57 inline HeapWord*
  58 OffsetTableContigSpace::block_start_const(const void* p) const {
  59   return _offsets.block_start(p);
  60 }
  61 
  62 inline HeapWord* ContiguousSpace::concurrent_iteration_safe_limit()
  63 {
  64   assert(_concurrent_iteration_safe_limit <= top(),
  65          "_concurrent_iteration_safe_limit update missed");
  66   return _concurrent_iteration_safe_limit;
  67 }
  68 
  69 inline void ContiguousSpace::set_concurrent_iteration_safe_limit(HeapWord* new_limit)
  70 {
  71   assert(new_limit <= top(), "uninitialized objects in the safe range");
  72   _concurrent_iteration_safe_limit = new_limit;
  73 }