1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 27 28 #include "gc/serial/markSweep.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/generation.hpp" 31 #include "gc/shared/space.hpp" 32 #include "gc/shared/spaceDecorator.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/oopsHierarchy.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/prefetch.inline.hpp" 37 #include "runtime/safepoint.hpp" 38 39 inline HeapWord* Space::block_start(const void* p) { 40 return block_start_const(p); 41 } 42 43 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 44 HeapWord* res = ContiguousSpace::allocate(size); 45 if (res != NULL) { 46 _offsets.alloc_block(res, size); 47 } 48 return res; 49 } 50 51 // Because of the requirement of keeping "_offsets" up to date with the 52 // allocations, we sequentialize these with a lock. Therefore, best if 53 // this is used for larger LAB allocations only. 54 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { 55 MutexLocker x(&_par_alloc_lock); 56 // This ought to be just "allocate", because of the lock above, but that 57 // ContiguousSpace::allocate asserts that either the allocating thread 58 // holds the heap lock or it is the VM thread and we're at a safepoint. 59 // The best I (dld) could figure was to put a field in ContiguousSpace 60 // meaning "locking at safepoint taken care of", and set/reset that 61 // here. But this will do for now, especially in light of the comment 62 // above. Perhaps in the future some lock-free manner of keeping the 63 // coordination. 64 HeapWord* res = ContiguousSpace::par_allocate(size); 65 if (res != NULL) { 66 _offsets.alloc_block(res, size); 67 } 68 return res; 69 } 70 71 inline HeapWord* 72 OffsetTableContigSpace::block_start_const(const void* p) const { 73 return _offsets.block_start(p); 74 } 75 76 size_t CompactibleSpace::obj_size(const HeapWord* addr) const { 77 return oop(addr)->size(); 78 } 79 80 class DeadSpacer : StackObj { 81 size_t _allowed_deadspace_words; 82 bool _active; 83 CompactibleSpace* _space; 84 85 public: 86 DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) { 87 size_t ratio = _space->allowed_dead_ratio(); 88 _active = ratio > 0; 89 90 if (_active) { 91 assert(!UseG1GC, "G1 should not be using dead space"); 92 93 // We allow some amount of garbage towards the bottom of the space, so 94 // we don't start compacting before there is a significant gain to be made. 95 // Occasionally, we want to ensure a full compaction, which is determined 96 // by the MarkSweepAlwaysCompactCount parameter. 97 if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) { 98 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; 99 } else { 100 _active = false; 101 } 102 } 103 } 104 105 106 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { 107 if (!_active) { 108 return false; 109 } 110 111 size_t dead_length = pointer_delta(dead_end, dead_start); 112 if (_allowed_deadspace_words >= dead_length) { 113 _allowed_deadspace_words -= dead_length; 114 CollectedHeap::fill_with_object(dead_start, dead_length); 115 oop obj = oop(dead_start); 116 obj->set_mark_raw(obj->mark_raw()->set_marked()); 117 118 assert(dead_length == (size_t)obj->size(), "bad filler object size"); 119 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b", 120 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); 121 122 return true; 123 } else { 124 _active = false; 125 return false; 126 } 127 } 128 129 }; 130 131 template <class SpaceType> 132 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) { 133 // Compute the new addresses for the live objects and store it in the mark 134 // Used by universe::mark_sweep_phase2() 135 136 // We're sure to be here before any objects are compacted into this 137 // space, so this is a good time to initialize this: 138 space->set_compaction_top(space->bottom()); 139 140 if (cp->space == NULL) { 141 assert(cp->gen != NULL, "need a generation"); 142 assert(cp->threshold == NULL, "just checking"); 143 assert(cp->gen->first_compaction_space() == space, "just checking"); 144 cp->space = cp->gen->first_compaction_space(); 145 cp->threshold = cp->space->initialize_threshold(); 146 cp->space->set_compaction_top(cp->space->bottom()); 147 } 148 149 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to. 150 151 DeadSpacer dead_spacer(space); 152 153 HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object. 154 HeapWord* first_dead = NULL; // The first dead object. 155 156 const intx interval = PrefetchScanIntervalInBytes; 157 158 HeapWord* cur_obj = space->bottom(); 159 HeapWord* scan_limit = space->scan_limit(); 160 161 while (cur_obj < scan_limit) { 162 assert(!space->scanned_block_is_obj(cur_obj) || 163 oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() || 164 oop(cur_obj)->mark_raw()->has_bias_pattern(), 165 "these are the only valid states during a mark sweep"); 166 if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) { 167 // prefetch beyond cur_obj 168 Prefetch::write(cur_obj, interval); 169 size_t size = space->scanned_block_size(cur_obj); 170 compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top); 171 cur_obj += size; 172 end_of_live = cur_obj; 173 } else { 174 // run over all the contiguous dead objects 175 HeapWord* end = cur_obj; 176 do { 177 // prefetch beyond end 178 Prefetch::write(end, interval); 179 end += space->scanned_block_size(end); 180 } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked())); 181 182 // see if we might want to pretend this object is alive so that 183 // we don't have to compact quite as often. 184 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) { 185 oop obj = oop(cur_obj); 186 compact_top = cp->space->forward(obj, obj->size(), cp, compact_top); 187 end_of_live = end; 188 } else { 189 // otherwise, it really is a free region. 190 191 // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. 192 *(HeapWord**)cur_obj = end; 193 194 // see if this is the first dead region. 195 if (first_dead == NULL) { 196 first_dead = cur_obj; 197 } 198 } 199 200 // move on to the next object 201 cur_obj = end; 202 } 203 } 204 205 assert(cur_obj == scan_limit, "just checking"); 206 space->_end_of_live = end_of_live; 207 if (first_dead != NULL) { 208 space->_first_dead = first_dead; 209 } else { 210 space->_first_dead = end_of_live; 211 } 212 213 // save the compaction_top of the compaction space. 214 cp->space->set_compaction_top(compact_top); 215 } 216 217 template <class SpaceType> 218 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { 219 // adjust all the interior pointers to point at the new locations of objects 220 // Used by MarkSweep::mark_sweep_phase3() 221 222 HeapWord* cur_obj = space->bottom(); 223 HeapWord* const end_of_live = space->_end_of_live; // Established by "scan_and_forward". 224 HeapWord* const first_dead = space->_first_dead; // Established by "scan_and_forward". 225 226 assert(first_dead <= end_of_live, "Stands to reason, no?"); 227 228 const intx interval = PrefetchScanIntervalInBytes; 229 230 debug_only(HeapWord* prev_obj = NULL); 231 while (cur_obj < end_of_live) { 232 Prefetch::write(cur_obj, interval); 233 if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) { 234 // cur_obj is alive 235 // point all the oops to the new location 236 size_t size = MarkSweep::adjust_pointers(oop(cur_obj)); 237 size = space->adjust_obj_size(size); 238 debug_only(prev_obj = cur_obj); 239 cur_obj += size; 240 } else { 241 debug_only(prev_obj = cur_obj); 242 // cur_obj is not a live object, instead it points at the next live object 243 cur_obj = *(HeapWord**)cur_obj; 244 assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj)); 245 } 246 } 247 248 assert(cur_obj == end_of_live, "just checking"); 249 } 250 251 #ifdef ASSERT 252 template <class SpaceType> 253 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) { 254 HeapWord* cur_obj = space->bottom(); 255 256 if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) { 257 // we have a chunk of the space which hasn't moved and we've reinitialized 258 // the mark word during the previous pass, so we can't use is_gc_marked for 259 // the traversal. 260 HeapWord* prev_obj = NULL; 261 262 while (cur_obj < space->_first_dead) { 263 size_t size = space->obj_size(cur_obj); 264 assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 265 prev_obj = cur_obj; 266 cur_obj += size; 267 } 268 } 269 } 270 #endif 271 272 template <class SpaceType> 273 inline void CompactibleSpace::clear_empty_region(SpaceType* space) { 274 // Let's remember if we were empty before we did the compaction. 275 bool was_empty = space->used_region().is_empty(); 276 // Reset space after compaction is complete 277 space->reset_after_compaction(); 278 // We do this clear, below, since it has overloaded meanings for some 279 // space subtypes. For example, OffsetTableContigSpace's that were 280 // compacted into will have had their offset table thresholds updated 281 // continuously, but those that weren't need to have their thresholds 282 // re-initialized. Also mangles unused area for debugging. 283 if (space->used_region().is_empty()) { 284 if (!was_empty) space->clear(SpaceDecorator::Mangle); 285 } else { 286 if (ZapUnusedHeapArea) space->mangle_unused_area(); 287 } 288 } 289 290 template <class SpaceType> 291 inline void CompactibleSpace::scan_and_compact(SpaceType* space) { 292 // Copy all live objects to their new location 293 // Used by MarkSweep::mark_sweep_phase4() 294 295 verify_up_to_first_dead(space); 296 297 HeapWord* const bottom = space->bottom(); 298 HeapWord* const end_of_live = space->_end_of_live; 299 300 assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live)); 301 if (space->_first_dead == end_of_live && (bottom == end_of_live || !oop(bottom)->is_gc_marked())) { 302 // Nothing to compact. The space is either empty or all live object should be left in place. 303 clear_empty_region(space); 304 return; 305 } 306 307 const intx scan_interval = PrefetchScanIntervalInBytes; 308 const intx copy_interval = PrefetchCopyIntervalInBytes; 309 310 assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live)); 311 HeapWord* cur_obj = bottom; 312 if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) { 313 // All object before _first_dead can be skipped. They should not be moved. 314 // A pointer to the first live object is stored at the memory location for _first_dead. 315 cur_obj = *(HeapWord**)(space->_first_dead); 316 } 317 318 debug_only(HeapWord* prev_obj = NULL); 319 while (cur_obj < end_of_live) { 320 if (!oop(cur_obj)->is_gc_marked()) { 321 debug_only(prev_obj = cur_obj); 322 // The first word of the dead object contains a pointer to the next live object or end of space. 323 cur_obj = *(HeapWord**)cur_obj; 324 assert(cur_obj > prev_obj, "we should be moving forward through memory"); 325 } else { 326 // prefetch beyond q 327 Prefetch::read(cur_obj, scan_interval); 328 329 // size and destination 330 size_t size = space->obj_size(cur_obj); 331 HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee(); 332 333 // prefetch beyond compaction_top 334 Prefetch::write(compaction_top, copy_interval); 335 336 // copy object and reinit its mark 337 assert(cur_obj != compaction_top, "everything in this pass should be moving"); 338 Copy::aligned_conjoint_words(cur_obj, compaction_top, size); 339 oop(compaction_top)->init_mark_raw(); 340 assert(oop(compaction_top)->klass() != NULL, "should have a class"); 341 342 debug_only(prev_obj = cur_obj); 343 cur_obj += size; 344 } 345 } 346 347 clear_empty_region(space); 348 } 349 350 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const { 351 return oop(addr)->size(); 352 } 353 354 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP