1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
  26 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
  27 
  28 #include "gc_implementation/shared/liveRange.hpp"
  29 #include "gc_implementation/shared/markSweep.inline.hpp"
  30 #include "gc_implementation/shared/spaceDecorator.hpp"
  31 #include "gc_interface/collectedHeap.hpp"
  32 #include "memory/space.hpp"
  33 #include "memory/universe.hpp"
  34 #include "runtime/prefetch.inline.hpp"
  35 #include "runtime/safepoint.hpp"
  36 
  37 inline HeapWord* Space::block_start(const void* p) {
  38   return block_start_const(p);
  39 }
  40 
  41 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  42   HeapWord* res = ContiguousSpace::allocate(size);
  43   if (res != NULL) {
  44     _offsets.alloc_block(res, size);
  45   }
  46   return res;
  47 }
  48 
  49 // Because of the requirement of keeping "_offsets" up to date with the
  50 // allocations, we sequentialize these with a lock.  Therefore, best if
  51 // this is used for larger LAB allocations only.
  52 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  53   MutexLocker x(&_par_alloc_lock);
  54   // This ought to be just "allocate", because of the lock above, but that
  55   // ContiguousSpace::allocate asserts that either the allocating thread
  56   // holds the heap lock or it is the VM thread and we're at a safepoint.
  57   // The best I (dld) could figure was to put a field in ContiguousSpace
  58   // meaning "locking at safepoint taken care of", and set/reset that
  59   // here.  But this will do for now, especially in light of the comment
  60   // above.  Perhaps in the future some lock-free manner of keeping the
  61   // coordination.
  62   HeapWord* res = ContiguousSpace::par_allocate(size);
  63   if (res != NULL) {
  64     _offsets.alloc_block(res, size);
  65   }
  66   return res;
  67 }
  68 
  69 inline HeapWord*
  70 OffsetTableContigSpace::block_start_const(const void* p) const {
  71   return _offsets.block_start(p);
  72 }
  73 
  74 template <class SpaceType>
  75 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
  76   // Compute the new addresses for the live objects and store it in the mark
  77   // Used by universe::mark_sweep_phase2()
  78   HeapWord* compact_top; // This is where we are currently compacting to.
  79 
  80   // We're sure to be here before any objects are compacted into this
  81   // space, so this is a good time to initialize this:
  82   space->set_compaction_top(space->bottom());
  83 
  84   if (cp->space == NULL) {
  85     assert(cp->gen != NULL, "need a generation");
  86     assert(cp->threshold == NULL, "just checking");
  87     assert(cp->gen->first_compaction_space() == space, "just checking");
  88     cp->space = cp->gen->first_compaction_space();
  89     compact_top = cp->space->bottom();
  90     cp->space->set_compaction_top(compact_top);
  91     cp->threshold = cp->space->initialize_threshold();
  92   } else {
  93     compact_top = cp->space->compaction_top();
  94   }
  95 
  96   // We allow some amount of garbage towards the bottom of the space, so
  97   // we don't start compacting before there is a significant gain to be made.
  98   // Occasionally, we want to ensure a full compaction, which is determined
  99   // by the MarkSweepAlwaysCompactCount parameter.
 100   uint invocations = MarkSweep::total_invocations();
 101   bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
 102 
 103   size_t allowed_deadspace = 0;
 104   if (skip_dead) {
 105     const size_t ratio = space->allowed_dead_ratio();
 106     allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
 107   }
 108 
 109   HeapWord* q = space->bottom();
 110   HeapWord* t = space->scan_limit();
 111 
 112   HeapWord*  end_of_live= q;            // One byte beyond the last byte of the last
 113                                         // live object.
 114   HeapWord*  first_dead = space->end(); // The first dead object.
 115   LiveRange* liveRange  = NULL;         // The current live range, recorded in the
 116                                         // first header of preceding free area.
 117   space->_first_dead = first_dead;
 118 
 119   const intx interval = PrefetchScanIntervalInBytes;
 120 
 121   while (q < t) {
 122     assert(!space->scanned_block_is_obj(q) ||
 123            oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
 124            oop(q)->mark()->has_bias_pattern(),
 125            "these are the only valid states during a mark sweep");
 126     if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
 127       // prefetch beyond q
 128       Prefetch::write(q, interval);
 129       size_t size = space->scanned_block_size(q);
 130       compact_top = cp->space->forward(oop(q), size, cp, compact_top);
 131       q += size;
 132       end_of_live = q;
 133     } else {
 134       // run over all the contiguous dead objects
 135       HeapWord* end = q;
 136       do {
 137         // prefetch beyond end
 138         Prefetch::write(end, interval);
 139         end += space->scanned_block_size(end);
 140       } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
 141 
 142       // see if we might want to pretend this object is alive so that
 143       // we don't have to compact quite as often.
 144       if (allowed_deadspace > 0 && q == compact_top) {
 145         size_t sz = pointer_delta(end, q);
 146         if (space->insert_deadspace(allowed_deadspace, q, sz)) {
 147           compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
 148           q = end;
 149           end_of_live = end;
 150           continue;
 151         }
 152       }
 153 
 154       // otherwise, it really is a free region.
 155 
 156       // for the previous LiveRange, record the end of the live objects.
 157       if (liveRange) {
 158         liveRange->set_end(q);
 159       }
 160 
 161       // record the current LiveRange object.
 162       // liveRange->start() is overlaid on the mark word.
 163       liveRange = (LiveRange*)q;
 164       liveRange->set_start(end);
 165       liveRange->set_end(end);
 166 
 167       // see if this is the first dead region.
 168       if (q < first_dead) {
 169         first_dead = q;
 170       }
 171 
 172       // move on to the next object
 173       q = end;
 174     }
 175   }
 176 
 177   assert(q == t, "just checking");
 178   if (liveRange != NULL) {
 179     liveRange->set_end(q);
 180   }
 181   space->_end_of_live = end_of_live;
 182   if (end_of_live < first_dead) {
 183     first_dead = end_of_live;
 184   }
 185   space->_first_dead = first_dead;
 186 
 187   // save the compaction_top of the compaction space.
 188   cp->space->set_compaction_top(compact_top);
 189 }
 190 
 191 template <class SpaceType>
 192 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
 193   // adjust all the interior pointers to point at the new locations of objects
 194   // Used by MarkSweep::mark_sweep_phase3()
 195 
 196   HeapWord* q = space->bottom();
 197   HeapWord* t = space->_end_of_live;  // Established by "prepare_for_compaction".
 198 
 199   assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
 200 
 201   if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
 202     // we have a chunk of the space which hasn't moved and we've
 203     // reinitialized the mark word during the previous pass, so we can't
 204     // use is_gc_marked for the traversal.
 205     HeapWord* end = space->_first_dead;
 206 
 207     while (q < end) {
 208       // I originally tried to conjoin "block_start(q) == q" to the
 209       // assertion below, but that doesn't work, because you can't
 210       // accurately traverse previous objects to get to the current one
 211       // after their pointers have been
 212       // updated, until the actual compaction is done.  dld, 4/00
 213       assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
 214 
 215       // point all the oops to the new location
 216       size_t size = oop(q)->adjust_pointers();
 217       size = space->adjust_obj_size(size);
 218 
 219       q += size;
 220     }
 221 
 222     if (space->_first_dead == t) {
 223       q = t;
 224     } else {
 225       // $$$ This is funky.  Using this to read the previously written
 226       // LiveRange.  See also use below.
 227       q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
 228     }
 229   }
 230 
 231   const intx interval = PrefetchScanIntervalInBytes;
 232 
 233   debug_only(HeapWord* prev_q = NULL);
 234   while (q < t) {
 235     // prefetch beyond q
 236     Prefetch::write(q, interval);
 237     if (oop(q)->is_gc_marked()) {
 238       // q is alive
 239       // point all the oops to the new location
 240       size_t size = oop(q)->adjust_pointers();
 241       size = space->adjust_obj_size(size);
 242       debug_only(prev_q = q);
 243       q += size;
 244     } else {
 245       // q is not a live object, so its mark should point at the next
 246       // live object
 247       debug_only(prev_q = q);
 248       q = (HeapWord*) oop(q)->mark()->decode_pointer();
 249       assert(q > prev_q, "we should be moving forward through memory");
 250     }
 251   }
 252 
 253   assert(q == t, "just checking");
 254 }
 255 
 256 template <class SpaceType>
 257 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
 258   // Copy all live objects to their new location
 259   // Used by MarkSweep::mark_sweep_phase4()
 260 
 261   HeapWord*       q = space->bottom();
 262   HeapWord* const t = space->_end_of_live;
 263   debug_only(HeapWord* prev_q = NULL);
 264 
 265   if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
 266     #ifdef ASSERT // Debug only
 267       // we have a chunk of the space which hasn't moved and we've reinitialized
 268       // the mark word during the previous pass, so we can't use is_gc_marked for
 269       // the traversal.
 270       HeapWord* const end = space->_first_dead;
 271 
 272       while (q < end) {
 273         size_t size = space->obj_size(q);
 274         assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
 275         prev_q = q;
 276         q += size;
 277       }
 278     #endif
 279 
 280     if (space->_first_dead == t) {
 281       q = t;
 282     } else {
 283       // $$$ Funky
 284       q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
 285     }
 286   }
 287 
 288   const intx scan_interval = PrefetchScanIntervalInBytes;
 289   const intx copy_interval = PrefetchCopyIntervalInBytes;
 290   while (q < t) {
 291     if (!oop(q)->is_gc_marked()) {
 292       // mark is pointer to next marked oop
 293       debug_only(prev_q = q);
 294       q = (HeapWord*) oop(q)->mark()->decode_pointer();
 295       assert(q > prev_q, "we should be moving forward through memory");
 296     } else {
 297       // prefetch beyond q
 298       Prefetch::read(q, scan_interval);
 299 
 300       // size and destination
 301       size_t size = space->obj_size(q);
 302       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
 303 
 304       // prefetch beyond compaction_top
 305       Prefetch::write(compaction_top, copy_interval);
 306 
 307       // copy object and reinit its mark
 308       assert(q != compaction_top, "everything in this pass should be moving");
 309       Copy::aligned_conjoint_words(q, compaction_top, size);
 310       oop(compaction_top)->init_mark();
 311       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 312 
 313       debug_only(prev_q = q);
 314       q += size;
 315     }
 316   }
 317 
 318   // Let's remember if we were empty before we did the compaction.
 319   bool was_empty = space->used_region().is_empty();
 320   // Reset space after compaction is complete
 321   space->reset_after_compaction();
 322   // We do this clear, below, since it has overloaded meanings for some
 323   // space subtypes.  For example, OffsetTableContigSpace's that were
 324   // compacted into will have had their offset table thresholds updated
 325   // continuously, but those that weren't need to have their thresholds
 326   // re-initialized.  Also mangles unused area for debugging.
 327   if (space->used_region().is_empty()) {
 328     if (!was_empty) space->clear(SpaceDecorator::Mangle);
 329   } else {
 330     if (ZapUnusedHeapArea) space->mangle_unused_area();
 331   }
 332 }
 333 #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP