1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP 27 28 #include "gc/serial/markSweep.inline.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/generation.hpp" 31 #include "gc/shared/liveRange.hpp" 32 #include "gc/shared/space.hpp" 33 #include "gc/shared/spaceDecorator.hpp" 34 #include "memory/universe.hpp" 35 #include "runtime/prefetch.inline.hpp" 36 #include "runtime/safepoint.hpp" 37 38 inline HeapWord* Space::block_start(const void* p) { 39 return block_start_const(p); 40 } 41 42 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 43 HeapWord* res = ContiguousSpace::allocate(size); 44 if (res != NULL) { 45 _offsets.alloc_block(res, size); 46 } 47 return res; 48 } 49 50 // Because of the requirement of keeping "_offsets" up to date with the 51 // allocations, we sequentialize these with a lock. Therefore, best if 52 // this is used for larger LAB allocations only. 53 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { 54 MutexLocker x(&_par_alloc_lock); 55 // This ought to be just "allocate", because of the lock above, but that 56 // ContiguousSpace::allocate asserts that either the allocating thread 57 // holds the heap lock or it is the VM thread and we're at a safepoint. 58 // The best I (dld) could figure was to put a field in ContiguousSpace 59 // meaning "locking at safepoint taken care of", and set/reset that 60 // here. But this will do for now, especially in light of the comment 61 // above. Perhaps in the future some lock-free manner of keeping the 62 // coordination. 63 HeapWord* res = ContiguousSpace::par_allocate(size); 64 if (res != NULL) { 65 _offsets.alloc_block(res, size); 66 } 67 return res; 68 } 69 70 inline HeapWord* 71 OffsetTableContigSpace::block_start_const(const void* p) const { 72 return _offsets.block_start(p); 73 } 74 75 template <class SpaceType> 76 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) { 77 // Compute the new addresses for the live objects and store it in the mark 78 // Used by universe::mark_sweep_phase2() 79 HeapWord* compact_top; // This is where we are currently compacting to. 80 81 // We're sure to be here before any objects are compacted into this 82 // space, so this is a good time to initialize this: 83 space->set_compaction_top(space->bottom()); 84 85 if (cp->space == NULL) { 86 assert(cp->gen != NULL, "need a generation"); 87 assert(cp->threshold == NULL, "just checking"); 88 assert(cp->gen->first_compaction_space() == space, "just checking"); 89 cp->space = cp->gen->first_compaction_space(); 90 compact_top = cp->space->bottom(); 91 cp->space->set_compaction_top(compact_top); 92 cp->threshold = cp->space->initialize_threshold(); 93 } else { 94 compact_top = cp->space->compaction_top(); 95 } 96 97 // We allow some amount of garbage towards the bottom of the space, so 98 // we don't start compacting before there is a significant gain to be made. 99 // Occasionally, we want to ensure a full compaction, which is determined 100 // by the MarkSweepAlwaysCompactCount parameter. 101 uint invocations = MarkSweep::total_invocations(); 102 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); 103 104 size_t allowed_deadspace = 0; 105 if (skip_dead) { 106 const size_t ratio = space->allowed_dead_ratio(); 107 allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize; 108 } 109 110 HeapWord* q = space->bottom(); 111 HeapWord* t = space->scan_limit(); 112 113 HeapWord* end_of_live= q; // One byte beyond the last byte of the last 114 // live object. 115 HeapWord* first_dead = space->end(); // The first dead object. 116 LiveRange* liveRange = NULL; // The current live range, recorded in the 117 // first header of preceding free area. 118 space->_first_dead = first_dead; 119 120 const intx interval = PrefetchScanIntervalInBytes; 121 122 while (q < t) { 123 assert(!space->scanned_block_is_obj(q) || 124 space->make_oop(q)->mark()->is_marked() || 125 oopDesc::bs()->resolve_oop(space->make_oop(q))->mark()->is_marked() || 126 space->make_oop(q)->mark()->is_unlocked() || 127 oopDesc::bs()->resolve_oop(space->make_oop(q))->mark()->is_unlocked() || 128 space->make_oop(q)->mark()->has_bias_pattern() || 129 oopDesc::bs()->resolve_oop(space->make_oop(q))->mark()->has_bias_pattern(), 130 "these are the only valid states during a mark sweep"); 131 if (space->scanned_block_is_obj(q) && space->make_oop(q)->is_gc_marked()) { 132 // prefetch beyond q 133 Prefetch::write(q, interval); 134 size_t size = space->scanned_block_size(q); 135 compact_top = cp->space->forward(space->make_oop(q), size, cp, compact_top); 136 q += size; 137 end_of_live = q; 138 } else { 139 // run over all the contiguous dead objects 140 HeapWord* end = q; 141 do { 142 // prefetch beyond end 143 Prefetch::write(end, interval); 144 end += space->scanned_block_size(end); 145 } while (end < t && (!space->scanned_block_is_obj(end) || !space->make_oop(end)->is_gc_marked())); 146 147 // see if we might want to pretend this object is alive so that 148 // we don't have to compact quite as often. 149 if (allowed_deadspace > 0 && q == compact_top) { 150 size_t sz = pointer_delta(end, q); 151 if (space->insert_deadspace(allowed_deadspace, q, sz)) { 152 compact_top = cp->space->forward(space->make_oop(q), sz, cp, compact_top); 153 q = end; 154 end_of_live = end; 155 continue; 156 } 157 } 158 159 // otherwise, it really is a free region. 160 161 // for the previous LiveRange, record the end of the live objects. 162 if (liveRange) { 163 liveRange->set_end(q); 164 } 165 166 // record the current LiveRange object. 167 // liveRange->start() is overlaid on the mark word. 168 liveRange = (LiveRange*) (HeapWord*) space->make_oop(q); 169 liveRange->set_start(end); 170 liveRange->set_end(end); 171 172 // see if this is the first dead region. 173 if (q < first_dead) { 174 first_dead = q; 175 } 176 177 // move on to the next object 178 q = end; 179 } 180 } 181 182 assert(q == t, "just checking"); 183 if (liveRange != NULL) { 184 liveRange->set_end(q); 185 } 186 space->_end_of_live = end_of_live; 187 if (end_of_live < first_dead) { 188 first_dead = end_of_live; 189 } 190 space->_first_dead = first_dead; 191 192 // save the compaction_top of the compaction space. 193 cp->space->set_compaction_top(compact_top); 194 } 195 196 template <class SpaceType> 197 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { 198 // adjust all the interior pointers to point at the new locations of objects 199 // Used by MarkSweep::mark_sweep_phase3() 200 201 HeapWord* q = space->bottom(); 202 HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction". 203 204 assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?"); 205 206 if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) { 207 // we have a chunk of the space which hasn't moved and we've 208 // reinitialized the mark word during the previous pass, so we can't 209 // use is_gc_marked for the traversal. 210 HeapWord* end = space->_first_dead; 211 212 while (q < end) { 213 // I originally tried to conjoin "block_start(q) == q" to the 214 // assertion below, but that doesn't work, because you can't 215 // accurately traverse previous objects to get to the current one 216 // after their pointers have been 217 // updated, until the actual compaction is done. dld, 4/00 218 assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs"); 219 220 // point all the oops to the new location 221 size_t size = MarkSweep::adjust_pointers(space->make_oop(q)); 222 size = space->adjust_obj_size(size); 223 224 q += size; 225 } 226 227 if (space->_first_dead == t) { 228 q = t; 229 } else { 230 // $$$ This is funky. Using this to read the previously written 231 // LiveRange. See also use below. 232 q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer(); 233 } 234 } 235 236 const intx interval = PrefetchScanIntervalInBytes; 237 238 debug_only(HeapWord* prev_q = NULL); 239 while (q < t) { 240 // prefetch beyond q 241 Prefetch::write(q, interval); 242 if (space->make_oop(q)->is_gc_marked()) { 243 // q is alive 244 // point all the oops to the new location 245 size_t size = MarkSweep::adjust_pointers(space->make_oop(q)); 246 size = space->adjust_obj_size(size); 247 debug_only(prev_q = q); 248 q += size; 249 } else { 250 // q is not a live object, so its mark should point at the next 251 // live object 252 debug_only(prev_q = q); 253 q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer(); 254 assert(q > prev_q, "we should be moving forward through memory"); 255 } 256 } 257 258 assert(q == t, "just checking"); 259 } 260 261 template <class SpaceType> 262 inline void CompactibleSpace::scan_and_compact(SpaceType* space) { 263 // Copy all live objects to their new location 264 // Used by MarkSweep::mark_sweep_phase4() 265 266 HeapWord* q = space->bottom(); 267 HeapWord* const t = space->_end_of_live; 268 debug_only(HeapWord* prev_q = NULL); 269 270 if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) { 271 #ifdef ASSERT // Debug only 272 // we have a chunk of the space which hasn't moved and we've reinitialized 273 // the mark word during the previous pass, so we can't use is_gc_marked for 274 // the traversal. 275 HeapWord* const end = space->_first_dead; 276 277 while (q < end) { 278 size_t size = space->obj_size(q); 279 assert(!space->make_oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 280 prev_q = q; 281 q += size; 282 } 283 #endif 284 285 if (space->_first_dead == t) { 286 q = t; 287 } else { 288 // $$$ Funky 289 q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer(); 290 } 291 } 292 293 const intx scan_interval = PrefetchScanIntervalInBytes; 294 const intx copy_interval = PrefetchCopyIntervalInBytes; 295 while (q < t) { 296 if (!space->make_oop(q)->is_gc_marked()) { 297 // mark is pointer to next marked oop 298 debug_only(prev_q = q); 299 q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer(); 300 assert(q > prev_q, "we should be moving forward through memory"); 301 } else { 302 // prefetch beyond q 303 Prefetch::read(q, scan_interval); 304 305 // size and destination 306 size_t size = space->obj_size(q); 307 HeapWord* compaction_top = (HeapWord*)space->make_oop(q)->forwardee(); 308 309 // prefetch beyond compaction_top 310 Prefetch::write(compaction_top, copy_interval); 311 312 // copy object and reinit its mark 313 assert(q != compaction_top, "everything in this pass should be moving"); 314 Copy::aligned_conjoint_words((HeapWord*) space->make_oop(q), compaction_top, size); 315 oop(compaction_top)->init_mark(); 316 assert(oop(compaction_top)->klass() != NULL, "should have a class"); 317 318 debug_only(prev_q = q); 319 q += size; 320 } 321 } 322 323 // Let's remember if we were empty before we did the compaction. 324 bool was_empty = space->used_region().is_empty(); 325 // Reset space after compaction is complete 326 space->reset_after_compaction(); 327 // We do this clear, below, since it has overloaded meanings for some 328 // space subtypes. For example, OffsetTableContigSpace's that were 329 // compacted into will have had their offset table thresholds updated 330 // continuously, but those that weren't need to have their thresholds 331 // re-initialized. Also mangles unused area for debugging. 332 if (space->used_region().is_empty()) { 333 if (!was_empty) space->clear(SpaceDecorator::Mangle); 334 } else { 335 if (ZapUnusedHeapArea) space->mangle_unused_area(); 336 } 337 } 338 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP