1 /* 2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 27 28 #include "gc/serial/markSweep.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/generation.hpp" 31 #include "gc/shared/space.hpp" 32 #include "gc/shared/spaceDecorator.hpp" 33 #include "memory/universe.hpp" 34 #include "runtime/prefetch.inline.hpp" 35 #include "runtime/safepoint.hpp" 36 37 inline HeapWord* Space::block_start(const void* p) { 38 return block_start_const(p); 39 } 40 41 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 42 HeapWord* res = ContiguousSpace::allocate(size); 43 if (res != NULL) { 44 _offsets.alloc_block(res, size); 45 } 46 return res; 47 } 48 49 // Because of the requirement of keeping "_offsets" up to date with the 50 // allocations, we sequentialize these with a lock. Therefore, best if 51 // this is used for larger LAB allocations only. 52 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { 53 MutexLocker x(&_par_alloc_lock); 54 // This ought to be just "allocate", because of the lock above, but that 55 // ContiguousSpace::allocate asserts that either the allocating thread 56 // holds the heap lock or it is the VM thread and we're at a safepoint. 57 // The best I (dld) could figure was to put a field in ContiguousSpace 58 // meaning "locking at safepoint taken care of", and set/reset that 59 // here. But this will do for now, especially in light of the comment 60 // above. Perhaps in the future some lock-free manner of keeping the 61 // coordination. 62 HeapWord* res = ContiguousSpace::par_allocate(size); 63 if (res != NULL) { 64 _offsets.alloc_block(res, size); 65 } 66 return res; 67 } 68 69 inline HeapWord* 70 OffsetTableContigSpace::block_start_const(const void* p) const { 71 return _offsets.block_start(p); 72 } 73 74 size_t CompactibleSpace::obj_size(const HeapWord* addr) const { 75 return oop(addr)->size(); 76 } 77 78 template <class SpaceType> 79 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) { 80 // Compute the new addresses for the live objects and store it in the mark 81 // Used by universe::mark_sweep_phase2() 82 HeapWord* compact_top; // This is where we are currently compacting to. 83 84 // We're sure to be here before any objects are compacted into this 85 // space, so this is a good time to initialize this: 86 space->set_compaction_top(space->bottom()); 87 88 if (cp->space == NULL) { 89 assert(cp->gen != NULL, "need a generation"); 90 assert(cp->threshold == NULL, "just checking"); 91 assert(cp->gen->first_compaction_space() == space, "just checking"); 92 cp->space = cp->gen->first_compaction_space(); 93 compact_top = cp->space->bottom(); 94 cp->space->set_compaction_top(compact_top); 95 cp->threshold = cp->space->initialize_threshold(); 96 } else { 97 compact_top = cp->space->compaction_top(); 98 } 99 100 // We allow some amount of garbage towards the bottom of the space, so 101 // we don't start compacting before there is a significant gain to be made. 102 // Occasionally, we want to ensure a full compaction, which is determined 103 // by the MarkSweepAlwaysCompactCount parameter. 104 uint invocations = MarkSweep::total_invocations(); 105 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); 106 107 size_t allowed_deadspace = 0; 108 if (skip_dead) { 109 const size_t ratio = space->allowed_dead_ratio(); 110 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize; 111 } 112 113 HeapWord* q = space->bottom(); 114 HeapWord* t = space->scan_limit(); 115 116 HeapWord* end_of_live= q; // One byte beyond the last byte of the last 117 // live object. 118 HeapWord* first_dead = space->end(); // The first dead object. 119 120 const intx interval = PrefetchScanIntervalInBytes; 121 122 while (q < t) { 123 assert(!space->scanned_block_is_obj(q) || 124 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || 125 oop(q)->mark()->has_bias_pattern(), 126 "these are the only valid states during a mark sweep"); 127 if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) { 128 // prefetch beyond q 129 Prefetch::write(q, interval); 130 size_t size = space->scanned_block_size(q); 131 compact_top = cp->space->forward(oop(q), size, cp, compact_top); 132 q += size; 133 end_of_live = q; 134 } else { 135 // run over all the contiguous dead objects 136 HeapWord* end = q; 137 do { 138 // prefetch beyond end 139 Prefetch::write(end, interval); 140 end += space->scanned_block_size(end); 141 } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked())); 142 143 // see if we might want to pretend this object is alive so that 144 // we don't have to compact quite as often. 145 if (allowed_deadspace > 0 && q == compact_top) { 146 size_t sz = pointer_delta(end, q); 147 if (space->insert_deadspace(allowed_deadspace, q, sz)) { 148 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); 149 q = end; 150 end_of_live = end; 151 continue; 152 } 153 } 154 155 // otherwise, it really is a free region. 156 157 // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. 158 (*(HeapWord**)q) = end; 159 160 // see if this is the first dead region. 161 if (q < first_dead) { 162 first_dead = q; 163 } 164 165 // move on to the next object 166 q = end; 167 } 168 } 169 170 assert(q == t, "just checking"); 171 space->_end_of_live = end_of_live; 172 if (end_of_live < first_dead) { 173 first_dead = end_of_live; 174 } 175 space->_first_dead = first_dead; 176 177 // save the compaction_top of the compaction space. 178 cp->space->set_compaction_top(compact_top); 179 } 180 181 template <class SpaceType> 182 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { 183 // adjust all the interior pointers to point at the new locations of objects 184 // Used by MarkSweep::mark_sweep_phase3() 185 186 HeapWord* q = space->bottom(); 187 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction". 188 189 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?"); 190 191 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) { 192 // we have a chunk of the space which hasn't moved and we've 193 // reinitialized the mark word during the previous pass, so we can't 194 // use is_gc_marked for the traversal. 195 HeapWord* end = space->_first_dead; 196 197 while (q < end) { 198 // I originally tried to conjoin "block_start(q) == q" to the 199 // assertion below, but that doesn't work, because you can't 200 // accurately traverse previous objects to get to the current one 201 // after their pointers have been 202 // updated, until the actual compaction is done. dld, 4/00 203 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs"); 204 205 // point all the oops to the new location 206 size_t size = MarkSweep::adjust_pointers(oop(q)); 207 size = space->adjust_obj_size(size); 208 209 q += size; 210 } 211 212 if (space->_first_dead == t) { 213 q = t; 214 } else { 215 // The first dead object is no longer an object. At that memory address, 216 // there is a pointer to the first live object that the previous phase found. 217 q = *((HeapWord**)(space->_first_dead)); 218 } 219 } 220 221 const intx interval = PrefetchScanIntervalInBytes; 222 223 debug_only(HeapWord* prev_q = NULL); 224 while (q < t) { 225 // prefetch beyond q 226 Prefetch::write(q, interval); 227 if (oop(q)->is_gc_marked()) { 228 // q is alive 229 // point all the oops to the new location 230 size_t size = MarkSweep::adjust_pointers(oop(q)); 231 size = space->adjust_obj_size(size); 232 debug_only(prev_q = q); 233 q += size; 234 } else { 235 debug_only(prev_q = q); 236 // q is not a live object, instead it points at the next live object 237 q = *(HeapWord**)q; 238 assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q)); 239 } 240 } 241 242 assert(q == t, "just checking"); 243 } 244 245 template <class SpaceType> 246 inline void CompactibleSpace::scan_and_compact(SpaceType* space) { 247 // Copy all live objects to their new location 248 // Used by MarkSweep::mark_sweep_phase4() 249 250 HeapWord* q = space->bottom(); 251 HeapWord* const t = space->_end_of_live; 252 debug_only(HeapWord* prev_q = NULL); 253 254 if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) { 255 #ifdef ASSERT // Debug only 256 // we have a chunk of the space which hasn't moved and we've reinitialized 257 // the mark word during the previous pass, so we can't use is_gc_marked for 258 // the traversal. 259 HeapWord* const end = space->_first_dead; 260 261 while (q < end) { 262 size_t size = space->obj_size(q); 263 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 264 prev_q = q; 265 q += size; 266 } 267 #endif 268 269 if (space->_first_dead == t) { 270 q = t; 271 } else { 272 // $$$ Funky 273 q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer(); 274 } 275 } 276 277 const intx scan_interval = PrefetchScanIntervalInBytes; 278 const intx copy_interval = PrefetchCopyIntervalInBytes; 279 while (q < t) { 280 if (!oop(q)->is_gc_marked()) { 281 // mark is pointer to next marked oop 282 debug_only(prev_q = q); 283 q = (HeapWord*) oop(q)->mark()->decode_pointer(); 284 assert(q > prev_q, "we should be moving forward through memory"); 285 } else { 286 // prefetch beyond q 287 Prefetch::read(q, scan_interval); 288 289 // size and destination 290 size_t size = space->obj_size(q); 291 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); 292 293 // prefetch beyond compaction_top 294 Prefetch::write(compaction_top, copy_interval); 295 296 // copy object and reinit its mark 297 assert(q != compaction_top, "everything in this pass should be moving"); 298 Copy::aligned_conjoint_words(q, compaction_top, size); 299 oop(compaction_top)->init_mark(); 300 assert(oop(compaction_top)->klass() != NULL, "should have a class"); 301 302 debug_only(prev_q = q); 303 q += size; 304 } 305 } 306 307 // Let's remember if we were empty before we did the compaction. 308 bool was_empty = space->used_region().is_empty(); 309 // Reset space after compaction is complete 310 space->reset_after_compaction(); 311 // We do this clear, below, since it has overloaded meanings for some 312 // space subtypes. For example, OffsetTableContigSpace's that were 313 // compacted into will have had their offset table thresholds updated 314 // continuously, but those that weren't need to have their thresholds 315 // re-initialized. Also mangles unused area for debugging. 316 if (space->used_region().is_empty()) { 317 if (!was_empty) space->clear(SpaceDecorator::Mangle); 318 } else { 319 if (ZapUnusedHeapArea) space->mangle_unused_area(); 320 } 321 } 322 323 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const { 324 return oop(addr)->size(); 325 } 326 327 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP