1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)space.inline.hpp     1.17 07/05/05 17:05:54 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  29   HeapWord* res = ContiguousSpace::allocate(size);
  30   if (res != NULL) {
  31     _offsets.alloc_block(res, size);
  32   }
  33   return res;
  34 }
  35 
  36 // Because of the requirement of keeping "_offsets" up to date with the
  37 // allocations, we sequentialize these with a lock.  Therefore, best if
  38 // this is used for larger LAB allocations only.
  39 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  40   MutexLocker x(&_par_alloc_lock);
  41   // This ought to be just "allocate", because of the lock above, but that
  42   // ContiguousSpace::allocate asserts that either the allocating thread
  43   // holds the heap lock or it is the VM thread and we're at a safepoint.
  44   // The best I (dld) could figure was to put a field in ContiguousSpace
  45   // meaning "locking at safepoint taken care of", and set/reset that
  46   // here.  But this will do for now, especially in light of the comment
  47   // above.  Perhaps in the future some lock-free manner of keeping the
  48   // coordination.
  49   HeapWord* res = ContiguousSpace::par_allocate(size);
  50   if (res != NULL) {
  51     _offsets.alloc_block(res, size);
  52   }
  53   return res;
  54 }
  55 
  56 inline HeapWord* OffsetTableContigSpace::block_start(const void* p) const {
  57   return _offsets.block_start(p);
  58 }
  59 
  60 inline HeapWord* ContiguousSpace::concurrent_iteration_safe_limit()
  61 {
  62   assert(_concurrent_iteration_safe_limit <= top(), 
  63          "_concurrent_iteration_safe_limit update missed");
  64   return _concurrent_iteration_safe_limit;
  65 }
  66 
  67 inline void ContiguousSpace::set_concurrent_iteration_safe_limit(HeapWord* new_limit)
  68 {
  69   assert(new_limit <= top(), "uninitialized objects in the safe range");
  70   _concurrent_iteration_safe_limit = new_limit;
  71 }