1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
  27 
  28 #include "gc/shared/blockOffsetTable.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/generation.hpp"
  31 #include "gc/shared/space.hpp"
  32 #include "gc/shared/spaceDecorator.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oopsHierarchy.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "runtime/prefetch.inline.hpp"
  37 #include "runtime/safepoint.hpp"
  38 #if INCLUDE_SERIALGC
  39 #include "gc/serial/markSweep.inline.hpp"
  40 #endif
  41 
  42 inline HeapWord* Space::block_start(const void* p) {
  43   return block_start_const(p);
  44 }
  45 
  46 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
  47   HeapWord* res = ContiguousSpace::allocate(size);
  48   if (res != NULL) {
  49     _offsets.alloc_block(res, size);
  50   }
  51   return res;
  52 }
  53 
  54 // Because of the requirement of keeping "_offsets" up to date with the
  55 // allocations, we sequentialize these with a lock.  Therefore, best if
  56 // this is used for larger LAB allocations only.
  57 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
  58   MutexLocker x(&_par_alloc_lock);
  59   // This ought to be just "allocate", because of the lock above, but that
  60   // ContiguousSpace::allocate asserts that either the allocating thread
  61   // holds the heap lock or it is the VM thread and we're at a safepoint.
  62   // The best I (dld) could figure was to put a field in ContiguousSpace
  63   // meaning "locking at safepoint taken care of", and set/reset that
  64   // here.  But this will do for now, especially in light of the comment
  65   // above.  Perhaps in the future some lock-free manner of keeping the
  66   // coordination.
  67   HeapWord* res = ContiguousSpace::par_allocate(size);
  68   if (res != NULL) {
  69     _offsets.alloc_block(res, size);
  70   }
  71   return res;
  72 }
  73 
  74 inline HeapWord*
  75 OffsetTableContigSpace::block_start_const(const void* p) const {
  76   return _offsets.block_start(p);
  77 }
  78 
  79 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
  80   return oop(addr)->size();
  81 }
  82 
  83 #if INCLUDE_SERIALGC
  84 
  85 class DeadSpacer : StackObj {
  86   size_t _allowed_deadspace_words;
  87   bool _active;
  88   CompactibleSpace* _space;
  89 
  90 public:
  91   DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
  92     size_t ratio = _space->allowed_dead_ratio();
  93     _active = ratio > 0;
  94 
  95     if (_active) {
  96       assert(!UseG1GC, "G1 should not be using dead space");
  97 
  98       // We allow some amount of garbage towards the bottom of the space, so
  99       // we don't start compacting before there is a significant gain to be made.
 100       // Occasionally, we want to ensure a full compaction, which is determined
 101       // by the MarkSweepAlwaysCompactCount parameter.
 102       if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
 103         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
 104       } else {
 105         _active = false;
 106       }
 107     }
 108   }
 109 
 110 
 111   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
 112     if (!_active) {
 113       return false;
 114     }
 115 
 116     size_t dead_length = pointer_delta(dead_end, dead_start);
 117     if (_allowed_deadspace_words >= dead_length) {
 118       _allowed_deadspace_words -= dead_length;
 119       CollectedHeap::fill_with_object(dead_start, dead_length);
 120       oop obj = oop(dead_start);
 121       obj->set_mark_raw(obj->mark_raw()->set_marked());
 122 
 123       assert(dead_length == (size_t)obj->size(), "bad filler object size");
 124       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
 125           p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
 126 
 127       return true;
 128     } else {
 129       _active = false;
 130       return false;
 131     }
 132   }
 133 
 134 };
 135 
 136 template <class SpaceType>
 137 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
 138   // Compute the new addresses for the live objects and store it in the mark
 139   // Used by universe::mark_sweep_phase2()
 140 
 141   // We're sure to be here before any objects are compacted into this
 142   // space, so this is a good time to initialize this:
 143   space->set_compaction_top(space->bottom());
 144 
 145   if (cp->space == NULL) {
 146     assert(cp->gen != NULL, "need a generation");
 147     assert(cp->threshold == NULL, "just checking");
 148     assert(cp->gen->first_compaction_space() == space, "just checking");
 149     cp->space = cp->gen->first_compaction_space();
 150     cp->threshold = cp->space->initialize_threshold();
 151     cp->space->set_compaction_top(cp->space->bottom());
 152   }
 153 
 154   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
 155 
 156   DeadSpacer dead_spacer(space);
 157 
 158   HeapWord*  end_of_live = space->bottom();  // One byte beyond the last byte of the last live object.
 159   HeapWord*  first_dead = NULL; // The first dead object.
 160 
 161   const intx interval = PrefetchScanIntervalInBytes;
 162 
 163   HeapWord* cur_obj = space->bottom();
 164   HeapWord* scan_limit = space->scan_limit();
 165 
 166   while (cur_obj < scan_limit) {
 167     assert(!space->scanned_block_is_obj(cur_obj) ||
 168            oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
 169            oop(cur_obj)->mark_raw()->has_bias_pattern(),
 170            "these are the only valid states during a mark sweep");
 171     if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
 172       // prefetch beyond cur_obj
 173       Prefetch::write(cur_obj, interval);
 174       size_t size = space->scanned_block_size(cur_obj);
 175       compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top);
 176       cur_obj += size;
 177       end_of_live = cur_obj;
 178     } else {
 179       // run over all the contiguous dead objects
 180       HeapWord* end = cur_obj;
 181       do {
 182         // prefetch beyond end
 183         Prefetch::write(end, interval);
 184         end += space->scanned_block_size(end);
 185       } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
 186 
 187       // see if we might want to pretend this object is alive so that
 188       // we don't have to compact quite as often.
 189       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
 190         oop obj = oop(cur_obj);
 191         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
 192         end_of_live = end;
 193       } else {
 194         // otherwise, it really is a free region.
 195 
 196         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
 197         *(HeapWord**)cur_obj = end;
 198 
 199         // see if this is the first dead region.
 200         if (first_dead == NULL) {
 201           first_dead = cur_obj;
 202         }
 203       }
 204 
 205       // move on to the next object
 206       cur_obj = end;
 207     }
 208   }
 209 
 210   assert(cur_obj == scan_limit, "just checking");
 211   space->_end_of_live = end_of_live;
 212   if (first_dead != NULL) {
 213     space->_first_dead = first_dead;
 214   } else {
 215     space->_first_dead = end_of_live;
 216   }
 217 
 218   // save the compaction_top of the compaction space.
 219   cp->space->set_compaction_top(compact_top);
 220 }
 221 
 222 template <class SpaceType>
 223 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
 224   // adjust all the interior pointers to point at the new locations of objects
 225   // Used by MarkSweep::mark_sweep_phase3()
 226 
 227   HeapWord* cur_obj = space->bottom();
 228   HeapWord* const end_of_live = space->_end_of_live;  // Established by "scan_and_forward".
 229   HeapWord* const first_dead = space->_first_dead;    // Established by "scan_and_forward".
 230 
 231   assert(first_dead <= end_of_live, "Stands to reason, no?");
 232 
 233   const intx interval = PrefetchScanIntervalInBytes;
 234 
 235   debug_only(HeapWord* prev_obj = NULL);
 236   while (cur_obj < end_of_live) {
 237     Prefetch::write(cur_obj, interval);
 238     if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) {
 239       // cur_obj is alive
 240       // point all the oops to the new location
 241       size_t size = MarkSweep::adjust_pointers(oop(cur_obj));
 242       size = space->adjust_obj_size(size);
 243       debug_only(prev_obj = cur_obj);
 244       cur_obj += size;
 245     } else {
 246       debug_only(prev_obj = cur_obj);
 247       // cur_obj is not a live object, instead it points at the next live object
 248       cur_obj = *(HeapWord**)cur_obj;
 249       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
 250     }
 251   }
 252 
 253   assert(cur_obj == end_of_live, "just checking");
 254 }
 255 
 256 #ifdef ASSERT
 257 template <class SpaceType>
 258 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
 259   HeapWord* cur_obj = space->bottom();
 260 
 261   if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
 262      // we have a chunk of the space which hasn't moved and we've reinitialized
 263      // the mark word during the previous pass, so we can't use is_gc_marked for
 264      // the traversal.
 265      HeapWord* prev_obj = NULL;
 266 
 267      while (cur_obj < space->_first_dead) {
 268        size_t size = space->obj_size(cur_obj);
 269        assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
 270        prev_obj = cur_obj;
 271        cur_obj += size;
 272      }
 273   }
 274 }
 275 #endif
 276 
 277 template <class SpaceType>
 278 inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
 279   // Let's remember if we were empty before we did the compaction.
 280   bool was_empty = space->used_region().is_empty();
 281   // Reset space after compaction is complete
 282   space->reset_after_compaction();
 283   // We do this clear, below, since it has overloaded meanings for some
 284   // space subtypes.  For example, OffsetTableContigSpace's that were
 285   // compacted into will have had their offset table thresholds updated
 286   // continuously, but those that weren't need to have their thresholds
 287   // re-initialized.  Also mangles unused area for debugging.
 288   if (space->used_region().is_empty()) {
 289     if (!was_empty) space->clear(SpaceDecorator::Mangle);
 290   } else {
 291     if (ZapUnusedHeapArea) space->mangle_unused_area();
 292   }
 293 }
 294 
 295 template <class SpaceType>
 296 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
 297   // Copy all live objects to their new location
 298   // Used by MarkSweep::mark_sweep_phase4()
 299 
 300   verify_up_to_first_dead(space);
 301 
 302   HeapWord* const bottom = space->bottom();
 303   HeapWord* const end_of_live = space->_end_of_live;
 304 
 305   assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
 306   if (space->_first_dead == end_of_live && (bottom == end_of_live || !oop(bottom)->is_gc_marked())) {
 307     // Nothing to compact. The space is either empty or all live object should be left in place.
 308     clear_empty_region(space);
 309     return;
 310   }
 311 
 312   const intx scan_interval = PrefetchScanIntervalInBytes;
 313   const intx copy_interval = PrefetchCopyIntervalInBytes;
 314 
 315   assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live));
 316   HeapWord* cur_obj = bottom;
 317   if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) {
 318     // All object before _first_dead can be skipped. They should not be moved.
 319     // A pointer to the first live object is stored at the memory location for _first_dead.
 320     cur_obj = *(HeapWord**)(space->_first_dead);
 321   }
 322 
 323   debug_only(HeapWord* prev_obj = NULL);
 324   while (cur_obj < end_of_live) {
 325     if (!oop(cur_obj)->is_gc_marked()) {
 326       debug_only(prev_obj = cur_obj);
 327       // The first word of the dead object contains a pointer to the next live object or end of space.
 328       cur_obj = *(HeapWord**)cur_obj;
 329       assert(cur_obj > prev_obj, "we should be moving forward through memory");
 330     } else {
 331       // prefetch beyond q
 332       Prefetch::read(cur_obj, scan_interval);
 333 
 334       // size and destination
 335       size_t size = space->obj_size(cur_obj);
 336       HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
 337 
 338       // prefetch beyond compaction_top
 339       Prefetch::write(compaction_top, copy_interval);
 340 
 341       // copy object and reinit its mark
 342       assert(cur_obj != compaction_top, "everything in this pass should be moving");
 343       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
 344       oop(compaction_top)->init_mark_raw();
 345       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 346 
 347       debug_only(prev_obj = cur_obj);
 348       cur_obj += size;
 349     }
 350   }
 351 
 352   clear_empty_region(space);
 353 }
 354 
 355 #endif // INCLUDE_SERIALGC
 356 
 357 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
 358   return oop(addr)->size();
 359 }
 360 
 361 template <typename OopClosureType>
 362 void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
 363   HeapWord* t;
 364   HeapWord* p = saved_mark_word();
 365   assert(p != NULL, "expected saved mark");
 366 
 367   const intx interval = PrefetchScanIntervalInBytes;
 368   do {
 369     t = top();
 370     while (p < t) {
 371       Prefetch::write(p, interval);
 372       debug_only(HeapWord* prev = p);
 373       oop m = oop(p);
 374       p += m->oop_iterate_size(blk);
 375     }
 376   } while (t < top());
 377 
 378   set_saved_mark_word(p);
 379 }
 380 
 381 template <typename OopClosureType>
 382 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {
 383   HeapWord* obj_addr = mr.start();
 384   HeapWord* limit = mr.end();
 385   while (obj_addr < limit) {
 386     assert(oopDesc::is_oop(oop(obj_addr)), "Should be an oop");
 387     obj_addr += oop(obj_addr)->oop_iterate_size(blk);
 388   }
 389 }
 390 
 391 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP