1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" 31 #include "gc/g1/heapRegion.hpp" 32 #include "gc/shared/space.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/atomic.hpp" 35 #include "runtime/prefetch.inline.hpp" 36 #include "utilities/align.hpp" 37 38 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, 39 size_t desired_word_size, 40 size_t* actual_size) { 41 HeapWord* obj = top(); 42 size_t available = pointer_delta(end(), obj); 43 size_t want_to_allocate = MIN2(available, desired_word_size); 44 if (want_to_allocate >= min_word_size) { 45 HeapWord* new_top = obj + want_to_allocate; 46 set_top(new_top); 47 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 48 *actual_size = want_to_allocate; 49 return obj; 50 } else { 51 return NULL; 52 } 53 } 54 55 inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, 56 size_t desired_word_size, 57 size_t* actual_size) { 58 do { 59 HeapWord* obj = top(); 60 size_t available = pointer_delta(end(), obj); 61 size_t want_to_allocate = MIN2(available, desired_word_size); 62 if (want_to_allocate >= min_word_size) { 63 HeapWord* new_top = obj + want_to_allocate; 64 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); 65 // result can be one of two: 66 // the old top value: the exchange succeeded 67 // otherwise: the new value of the top is returned. 68 if (result == obj) { 69 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 70 *actual_size = want_to_allocate; 71 return obj; 72 } 73 } else { 74 return NULL; 75 } 76 } while (true); 77 } 78 79 inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, 80 size_t desired_word_size, 81 size_t* actual_size) { 82 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 83 if (res != NULL) { 84 _bot_part.alloc_block(res, *actual_size); 85 } 86 return res; 87 } 88 89 inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { 90 size_t temp; 91 return allocate(word_size, word_size, &temp); 92 } 93 94 inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { 95 size_t temp; 96 return par_allocate(word_size, word_size, &temp); 97 } 98 99 // Because of the requirement of keeping "_offsets" up to date with the 100 // allocations, we sequentialize these with a lock. Therefore, best if 101 // this is used for larger LAB allocations only. 102 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, 103 size_t desired_word_size, 104 size_t* actual_size) { 105 MutexLocker x(&_par_alloc_lock); 106 return allocate(min_word_size, desired_word_size, actual_size); 107 } 108 109 inline HeapWord* G1ContiguousSpace::block_start(const void* p) { 110 return _bot_part.block_start(p); 111 } 112 113 inline HeapWord* 114 G1ContiguousSpace::block_start_const(const void* p) const { 115 return _bot_part.block_start_const(p); 116 } 117 118 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { 119 HeapWord* addr = (HeapWord*) obj; 120 121 assert(addr < top(), "must be"); 122 assert(!is_closed_archive(), 123 "Closed archive regions should not have references into other regions"); 124 assert(!is_humongous(), "Humongous objects not handled here"); 125 bool obj_is_dead = is_obj_dead(obj, prev_bitmap); 126 127 if (ClassUnloadingWithConcurrentMark && obj_is_dead) { 128 assert(!block_is_obj(addr), "must be"); 129 *size = block_size_using_bitmap(addr, prev_bitmap); 130 } else { 131 assert(block_is_obj(addr), "must be"); 132 *size = obj->size(); 133 } 134 return obj_is_dead; 135 } 136 137 inline bool 138 HeapRegion::block_is_obj(const HeapWord* p) const { 139 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 140 141 if (!this->is_in(p)) { 142 assert(is_continues_humongous(), "This case can only happen for humongous regions"); 143 return (p == humongous_start_region()->bottom()); 144 } 145 if (ClassUnloadingWithConcurrentMark) { 146 return !g1h->is_obj_dead(oop(p), this); 147 } 148 return p < top(); 149 } 150 151 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const { 152 assert(ClassUnloadingWithConcurrentMark, 153 "All blocks should be objects if class unloading isn't used, so this method should not be called. " 154 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 155 "addr: " PTR_FORMAT, 156 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 157 158 // Old regions' dead objects may have dead classes 159 // We need to find the next live object using the bitmap 160 HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start()); 161 162 assert(next > addr, "must get the next live object"); 163 return pointer_delta(next, addr); 164 } 165 166 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { 167 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); 168 return !obj_allocated_since_prev_marking(obj) && 169 !prev_bitmap->is_marked((HeapWord*)obj) && 170 !is_open_archive(); 171 } 172 173 inline size_t HeapRegion::block_size(const HeapWord *addr) const { 174 if (addr == top()) { 175 return pointer_delta(end(), addr); 176 } 177 178 if (block_is_obj(addr)) { 179 return oop(addr)->size(); 180 } 181 182 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap()); 183 } 184 185 inline void HeapRegion::complete_compaction() { 186 // Reset space and bot after compaction is complete if needed. 187 reset_after_compaction(); 188 if (used_region().is_empty()) { 189 reset_bot(); 190 } 191 192 // After a compaction the mark bitmap is invalid, so we must 193 // treat all objects as being inside the unmarked area. 194 zero_marked_bytes(); 195 init_top_at_mark_start(); 196 197 // Clear unused heap memory in debug builds. 198 if (ZapUnusedHeapArea) { 199 mangle_unused_area(); 200 } 201 } 202 203 template<typename ApplyToMarkedClosure> 204 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { 205 HeapWord* limit = scan_limit(); 206 HeapWord* next_addr = bottom(); 207 208 while (next_addr < limit) { 209 Prefetch::write(next_addr, PrefetchScanIntervalInBytes); 210 // This explicit is_marked check is a way to avoid 211 // some extra work done by get_next_marked_addr for 212 // the case where next_addr is marked. 213 if (bitmap->is_marked(next_addr)) { 214 oop current = oop(next_addr); 215 next_addr += closure->apply(current); 216 } else { 217 next_addr = bitmap->get_next_marked_addr(next_addr, limit); 218 } 219 } 220 221 assert(next_addr == limit, "Should stop the scan at the limit."); 222 } 223 224 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 225 size_t desired_word_size, 226 size_t* actual_word_size) { 227 assert(is_young(), "we can only skip BOT updates on young regions"); 228 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 229 } 230 231 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 232 size_t temp; 233 return allocate_no_bot_updates(word_size, word_size, &temp); 234 } 235 236 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 237 size_t desired_word_size, 238 size_t* actual_word_size) { 239 assert(is_young(), "we can only skip BOT updates on young regions"); 240 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 241 } 242 243 inline void HeapRegion::note_start_of_marking() { 244 _next_marked_bytes = 0; 245 _next_top_at_mark_start = top(); 246 } 247 248 inline void HeapRegion::note_end_of_marking() { 249 _prev_top_at_mark_start = _next_top_at_mark_start; 250 _next_top_at_mark_start = bottom(); 251 _prev_marked_bytes = _next_marked_bytes; 252 _next_marked_bytes = 0; 253 } 254 255 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { 256 if (is_survivor()) { 257 // This is how we always allocate survivors. 258 assert(_next_top_at_mark_start == bottom(), "invariant"); 259 } else { 260 if (during_initial_mark) { 261 // During initial-mark we'll explicitly mark any objects on old 262 // regions that are pointed to by roots. Given that explicit 263 // marks only make sense under NTAMS it'd be nice if we could 264 // check that condition if we wanted to. Given that we don't 265 // know where the top of this region will end up, we simply set 266 // NTAMS to the end of the region so all marks will be below 267 // NTAMS. We'll set it to the actual top when we retire this region. 268 _next_top_at_mark_start = end(); 269 } else { 270 // We could have re-used this old region as to-space over a 271 // couple of GCs since the start of the concurrent marking 272 // cycle. This means that [bottom,NTAMS) will contain objects 273 // copied up to and including initial-mark and [NTAMS, top) 274 // will contain objects copied during the concurrent marking cycle. 275 assert(top() >= _next_top_at_mark_start, "invariant"); 276 } 277 } 278 } 279 280 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { 281 if (is_survivor()) { 282 // This is how we always allocate survivors. 283 assert(_next_top_at_mark_start == bottom(), "invariant"); 284 } else { 285 if (during_initial_mark) { 286 // See the comment for note_start_of_copying() for the details 287 // on this. 288 assert(_next_top_at_mark_start == end(), "pre-condition"); 289 _next_top_at_mark_start = top(); 290 } else { 291 // See the comment for note_start_of_copying() for the details 292 // on this. 293 assert(top() >= _next_top_at_mark_start, "invariant"); 294 } 295 } 296 } 297 298 inline bool HeapRegion::in_collection_set() const { 299 return G1CollectedHeap::heap()->is_in_cset(this); 300 } 301 302 template <class Closure, bool is_gc_active> 303 bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr, 304 Closure* cl, 305 G1CollectedHeap* g1h) { 306 assert(is_humongous(), "precondition"); 307 HeapRegion* sr = humongous_start_region(); 308 oop obj = oop(sr->bottom()); 309 310 // If concurrent and klass_or_null is NULL, then space has been 311 // allocated but the object has not yet been published by setting 312 // the klass. That can only happen if the card is stale. However, 313 // we've already set the card clean, so we must return failure, 314 // since the allocating thread could have performed a write to the 315 // card that might be missed otherwise. 316 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { 317 return false; 318 } 319 320 // We have a well-formed humongous object at the start of sr. 321 // Only filler objects follow a humongous object in the containing 322 // regions, and we can ignore those. So only process the one 323 // humongous object. 324 if (!g1h->is_obj_dead(obj, sr)) { 325 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 326 // objArrays are always marked precisely, so limit processing 327 // with mr. Non-objArrays might be precisely marked, and since 328 // it's humongous it's worthwhile avoiding full processing. 329 // However, the card could be stale and only cover filler 330 // objects. That should be rare, so not worth checking for; 331 // instead let it fall out from the bounded iteration. 332 obj->oop_iterate(cl, mr); 333 } else { 334 // If obj is not an objArray and mr contains the start of the 335 // obj, then this could be an imprecise mark, and we need to 336 // process the entire object. 337 obj->oop_iterate(cl); 338 } 339 } 340 return true; 341 } 342 343 template <bool is_gc_active, class Closure> 344 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, 345 Closure* cl) { 346 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 347 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 348 349 // Special handling for humongous regions. 350 if (is_humongous()) { 351 return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h); 352 } 353 assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); 354 355 // Because mr has been trimmed to what's been allocated in this 356 // region, the parts of the heap that are examined here are always 357 // parsable; there's no need to use klass_or_null to detect 358 // in-progress allocation. 359 360 // Cache the boundaries of the memory region in some const locals 361 HeapWord* const start = mr.start(); 362 HeapWord* const end = mr.end(); 363 364 // Find the obj that extends onto mr.start(). 365 // Update BOT as needed while finding start of (possibly dead) 366 // object containing the start of the region. 367 HeapWord* cur = block_start(start); 368 369 #ifdef ASSERT 370 { 371 assert(cur <= start, 372 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 373 HeapWord* next = cur + block_size(cur); 374 assert(start < next, 375 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 376 } 377 #endif 378 379 const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap(); 380 do { 381 oop obj = oop(cur); 382 assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur)); 383 assert(obj->klass_or_null() != NULL, 384 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 385 386 size_t size; 387 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); 388 389 cur += size; 390 if (!is_dead) { 391 // Process live object's references. 392 393 // Non-objArrays are usually marked imprecise at the object 394 // start, in which case we need to iterate over them in full. 395 // objArrays are precisely marked, but can still be iterated 396 // over in full if completely covered. 397 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 398 obj->oop_iterate(cl); 399 } else { 400 obj->oop_iterate(cl, mr); 401 } 402 } 403 } while (cur < end); 404 405 return true; 406 } 407 408 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP