1 /*
   2  * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  27 
  28 #include "gc/serial/markSweep.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/generation.hpp"
  31 #include "gc/shared/space.hpp"
  32 #include "gc/shared/spaceDecorator.hpp"
  33 #include "memory/universe.hpp"
  34 #include "runtime/prefetch.inline.hpp"
  35 #include "runtime/safepoint.hpp"
  36 
  37 inline HeapWord* Space::block_start(const void* p) {
  38   return block_start_const(p);
  39 }
  40 
  41 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  42   HeapWord* res = ContiguousSpace::allocate(size);
  43   if (res != NULL) {
  44     _offsets.alloc_block(res, size);
  45   }
  46   return res;
  47 }
  48 
  49 // Because of the requirement of keeping "_offsets" up to date with the
  50 // allocations, we sequentialize these with a lock.  Therefore, best if
  51 // this is used for larger LAB allocations only.
  52 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  53   MutexLocker x(&_par_alloc_lock);
  54   // This ought to be just "allocate", because of the lock above, but that
  55   // ContiguousSpace::allocate asserts that either the allocating thread
  56   // holds the heap lock or it is the VM thread and we're at a safepoint.
  57   // The best I (dld) could figure was to put a field in ContiguousSpace
  58   // meaning "locking at safepoint taken care of", and set/reset that
  59   // here.  But this will do for now, especially in light of the comment
  60   // above.  Perhaps in the future some lock-free manner of keeping the
  61   // coordination.
  62   HeapWord* res = ContiguousSpace::par_allocate(size);
  63   if (res != NULL) {
  64     _offsets.alloc_block(res, size);
  65   }
  66   return res;
  67 }
  68 
  69 inline HeapWord*
  70 OffsetTableContigSpace::block_start_const(const void* p) const {
  71   return _offsets.block_start(p);
  72 }
  73 
  74 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
  75   return oop(addr)->size();
  76 }
  77 
  78 template <class SpaceType>
  79 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
  80   // Compute the new addresses for the live objects and store it in the mark
  81   // Used by universe::mark_sweep_phase2()
  82 
  83   // We're sure to be here before any objects are compacted into this
  84   // space, so this is a good time to initialize this:
  85   space->set_compaction_top(space->bottom());
  86 
  87   if (cp->space == NULL) {
  88     assert(cp->gen != NULL, "need a generation");
  89     assert(cp->threshold == NULL, "just checking");
  90     assert(cp->gen->first_compaction_space() == space, "just checking");
  91     cp->space = cp->gen->first_compaction_space();
  92     cp->threshold = cp->space->initialize_threshold();
  93     cp->space->set_compaction_top(cp->space->bottom());
  94   }
  95 
  96   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
  97 
  98   // We allow some amount of garbage towards the bottom of the space, so
  99   // we don't start compacting before there is a significant gain to be made.
 100   // Occasionally, we want to ensure a full compaction, which is determined
 101   // by the MarkSweepAlwaysCompactCount parameter.
 102   uint invocations = MarkSweep::total_invocations();
 103   bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
 104 
 105   size_t allowed_deadspace = 0;
 106   if (skip_dead) {
 107     const size_t ratio = space->allowed_dead_ratio();
 108     allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
 109   }
 110 
 111   HeapWord*  end_of_live = space->bottom();  // One byte beyond the last byte of the last live object.
 112   HeapWord*  first_dead = NULL; // The first dead object.
 113 
 114   const intx interval = PrefetchScanIntervalInBytes;
 115 
 116   HeapWord* cur_obj = space->bottom();
 117   HeapWord* scan_limit = space->scan_limit();
 118 
 119   while (cur_obj < scan_limit) {
 120     assert(!space->scanned_block_is_obj(cur_obj) ||
 121            oop(cur_obj)->mark()->is_marked() || oop(cur_obj)->mark()->is_unlocked() ||
 122            oop(cur_obj)->mark()->has_bias_pattern(),
 123            "these are the only valid states during a mark sweep");
 124     if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
 125       // prefetch beyond q
 126       Prefetch::write(cur_obj, interval);
 127       size_t size = space->scanned_block_size(cur_obj);
 128       compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
 129       cur_obj += size;
 130       end_of_live = cur_obj;
 131     } else {
 132       // run over all the contiguous dead objects
 133       HeapWord* end = cur_obj;
 134       do {
 135         // prefetch beyond end
 136         Prefetch::write(end, interval);
 137         end += space->scanned_block_size(end);
 138       } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
 139 
 140       // see if we might want to pretend this object is alive so that
 141       // we don't have to compact quite as often.
 142       if (allowed_deadspace > 0 && cur_obj == compact_top) {
 143         assert(!UseG1GC, "G1 should not be allowing dead space");
 144         size_t sz = pointer_delta(end, cur_obj);
 145         if (space->insert_deadspace(allowed_deadspace, cur_obj, sz)) {
 146           compact_top = cp->space->forward(oop(cur_obj), sz, cp, compact_top);
 147           cur_obj = end;
 148           end_of_live = end;
 149           continue;
 150         }
 151       }
 152 
 153       // otherwise, it really is a free region.
 154 
 155       // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
 156       *(HeapWord**)cur_obj = end;
 157 
 158       // see if this is the first dead region.
 159       if (first_dead == NULL) {
 160         first_dead = cur_obj;
 161       }
 162 
 163       // move on to the next object
 164       cur_obj = end;
 165     }
 166   }
 167 
 168   assert(cur_obj == scan_limit, "just checking");
 169   space->_end_of_live = end_of_live;
 170   if (first_dead != NULL) {
 171     space->_first_dead = first_dead;
 172   } else {
 173     space->_first_dead = end_of_live;
 174   }
 175 
 176   // save the compaction_top of the compaction space.
 177   cp->space->set_compaction_top(compact_top);
 178 }
 179 
 180 template <class SpaceType>
 181 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
 182   // adjust all the interior pointers to point at the new locations of objects
 183   // Used by MarkSweep::mark_sweep_phase3()
 184 
 185   HeapWord* cur_obj = space->bottom();
 186   HeapWord* const end_of_live = space->_end_of_live;  // Established by "scan_and_forward".
 187   HeapWord* const first_dead = space->_first_dead;    // Established by "scan_and_forward".
 188 
 189   assert(first_dead <= end_of_live, "Stands to reason, no?");
 190 
 191   const intx interval = PrefetchScanIntervalInBytes;
 192 
 193   debug_only(HeapWord* prev_obj = NULL);
 194   while (cur_obj < end_of_live) {
 195     Prefetch::write(cur_obj, interval);
 196     if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) {
 197       // cur_obj is alive
 198       // point all the oops to the new location
 199       size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
 200       size = space->adjust_obj_size(size);
 201       debug_only(prev_obj = cur_obj);
 202       cur_obj += size;
 203     } else {
 204       debug_only(prev_obj = cur_obj);
 205       // cur_obj is not a live object, instead it points at the next live object
 206       cur_obj = *(HeapWord**)cur_obj;
 207       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
 208     }
 209   }
 210 
 211   assert(cur_obj == end_of_live, "just checking");
 212 }
 213 
 214 template <class SpaceType>
 215 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
 216   // Copy all live objects to their new location
 217   // Used by MarkSweep::mark_sweep_phase4()
 218 
 219   HeapWord*       q = space->bottom();
 220   HeapWord* const t = space->_end_of_live;
 221   debug_only(HeapWord* prev_q = NULL);
 222 
 223   if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
 224     #ifdef ASSERT // Debug only
 225       // we have a chunk of the space which hasn't moved and we've reinitialized
 226       // the mark word during the previous pass, so we can't use is_gc_marked for
 227       // the traversal.
 228       HeapWord* const end = space->_first_dead;
 229 
 230       while (q < end) {
 231         size_t size = space->obj_size(q);
 232         assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
 233         prev_q = q;
 234         q += size;
 235       }
 236     #endif
 237 
 238     if (space->_first_dead == t) {
 239       q = t;
 240     } else {
 241       // $$$ Funky
 242       q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
 243     }
 244   }
 245 
 246   const intx scan_interval = PrefetchScanIntervalInBytes;
 247   const intx copy_interval = PrefetchCopyIntervalInBytes;
 248   while (q < t) {
 249     if (!oop(q)->is_gc_marked()) {
 250       // mark is pointer to next marked oop
 251       debug_only(prev_q = q);
 252       q = (HeapWord*) oop(q)->mark()->decode_pointer();
 253       assert(q > prev_q, "we should be moving forward through memory");
 254     } else {
 255       // prefetch beyond q
 256       Prefetch::read(q, scan_interval);
 257 
 258       // size and destination
 259       size_t size = space->obj_size(q);
 260       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
 261 
 262       // prefetch beyond compaction_top
 263       Prefetch::write(compaction_top, copy_interval);
 264 
 265       // copy object and reinit its mark
 266       assert(q != compaction_top, "everything in this pass should be moving");
 267       Copy::aligned_conjoint_words(q, compaction_top, size);
 268       oop(compaction_top)->init_mark();
 269       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 270 
 271       debug_only(prev_q = q);
 272       q += size;
 273     }
 274   }
 275 
 276   // Let's remember if we were empty before we did the compaction.
 277   bool was_empty = space->used_region().is_empty();
 278   // Reset space after compaction is complete
 279   space->reset_after_compaction();
 280   // We do this clear, below, since it has overloaded meanings for some
 281   // space subtypes.  For example, OffsetTableContigSpace's that were
 282   // compacted into will have had their offset table thresholds updated
 283   // continuously, but those that weren't need to have their thresholds
 284   // re-initialized.  Also mangles unused area for debugging.
 285   if (space->used_region().is_empty()) {
 286     if (!was_empty) space->clear(SpaceDecorator::Mangle);
 287   } else {
 288     if (ZapUnusedHeapArea) space->mangle_unused_area();
 289   }
 290 }
 291 
 292 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
 293   return oop(addr)->size();
 294 }
 295 
 296 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP