src/share/vm/memory/space.inline.hpp

Print this page
rev 6796 : [mq]: templateOopIterate


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
  26 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
  27 
  28 #include "gc_interface/collectedHeap.hpp"

  29 #include "memory/space.hpp"
  30 #include "memory/universe.hpp"
  31 #include "runtime/prefetch.inline.hpp"
  32 #include "runtime/safepoint.hpp"
  33 
  34 inline HeapWord* Space::block_start(const void* p) {
  35   return block_start_const(p);
  36 }
  37 
  38 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
  39   /* Compute the new addresses for the live objects and store it in the mark \
  40    * Used by universe::mark_sweep_phase2()                                   \
  41    */                                                                        \
  42   HeapWord* compact_top; /* This is where we are currently compacting to. */ \
  43                                                                              \
  44   /* We're sure to be here before any objects are compacted into this        \
  45    * space, so this is a good time to initialize this:                       \
  46    */                                                                        \
  47   set_compaction_top(bottom());                                              \
  48                                                                              \


 316   MutexLocker x(&_par_alloc_lock);
 317   // This ought to be just "allocate", because of the lock above, but that
 318   // ContiguousSpace::allocate asserts that either the allocating thread
 319   // holds the heap lock or it is the VM thread and we're at a safepoint.
 320   // The best I (dld) could figure was to put a field in ContiguousSpace
 321   // meaning "locking at safepoint taken care of", and set/reset that
 322   // here.  But this will do for now, especially in light of the comment
 323   // above.  Perhaps in the future some lock-free manner of keeping the
 324   // coordination.
 325   HeapWord* res = ContiguousSpace::par_allocate(size);
 326   if (res != NULL) {
 327     _offsets.alloc_block(res, size);
 328   }
 329   return res;
 330 }
 331 
 332 inline HeapWord*
 333 OffsetTableContigSpace::block_start_const(const void* p) const {
 334   return _offsets.block_start(p);
 335 }












 336 
 337 #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
  26 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
  27 
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/prefetch.inline.hpp"
  33 #include "runtime/safepoint.hpp"
  34 
  35 inline HeapWord* Space::block_start(const void* p) {
  36   return block_start_const(p);
  37 }
  38 
  39 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
  40   /* Compute the new addresses for the live objects and store it in the mark \
  41    * Used by universe::mark_sweep_phase2()                                   \
  42    */                                                                        \
  43   HeapWord* compact_top; /* This is where we are currently compacting to. */ \
  44                                                                              \
  45   /* We're sure to be here before any objects are compacted into this        \
  46    * space, so this is a good time to initialize this:                       \
  47    */                                                                        \
  48   set_compaction_top(bottom());                                              \
  49                                                                              \


 317   MutexLocker x(&_par_alloc_lock);
 318   // This ought to be just "allocate", because of the lock above, but that
 319   // ContiguousSpace::allocate asserts that either the allocating thread
 320   // holds the heap lock or it is the VM thread and we're at a safepoint.
 321   // The best I (dld) could figure was to put a field in ContiguousSpace
 322   // meaning "locking at safepoint taken care of", and set/reset that
 323   // here.  But this will do for now, especially in light of the comment
 324   // above.  Perhaps in the future some lock-free manner of keeping the
 325   // coordination.
 326   HeapWord* res = ContiguousSpace::par_allocate(size);
 327   if (res != NULL) {
 328     _offsets.alloc_block(res, size);
 329   }
 330   return res;
 331 }
 332 
 333 inline HeapWord*
 334 OffsetTableContigSpace::block_start_const(const void* p) const {
 335   return _offsets.block_start(p);
 336 }
 337 
 338 #ifndef SERIALGC
 339 template <bool nv, typename OopClosureType>
 340 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {
 341   HeapWord* obj_addr = mr.start();
 342   HeapWord* t = mr.end();
 343   while (obj_addr < t) {
 344     assert(oop(obj_addr)->is_oop(), "Should be an oop");
 345     obj_addr += oop(obj_addr)->oop_iterate<nv>(blk);
 346   }
 347 }
 348 #endif // SERIALGC
 349 
 350 #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP