1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" 31 #include "gc/g1/heapRegion.hpp" 32 #include "gc/shared/space.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/atomic.hpp" 35 #include "runtime/prefetch.inline.hpp" 36 #include "runtime/mutexLocker.inline.hpp" 37 #include "utilities/align.hpp" 38 39 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, 40 size_t desired_word_size, 41 size_t* actual_size) { 42 HeapWord* obj = top(); 43 size_t available = pointer_delta(end(), obj); 44 size_t want_to_allocate = MIN2(available, desired_word_size); 45 if (want_to_allocate >= min_word_size) { 46 HeapWord* new_top = obj + want_to_allocate; 47 set_top(new_top); 48 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 49 *actual_size = want_to_allocate; 50 return obj; 51 } else { 52 return NULL; 53 } 54 } 55 56 inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, 57 size_t desired_word_size, 58 size_t* actual_size) { 59 do { 60 HeapWord* obj = top(); 61 size_t available = pointer_delta(end(), obj); 62 size_t want_to_allocate = MIN2(available, desired_word_size); 63 if (want_to_allocate >= min_word_size) { 64 HeapWord* new_top = obj + want_to_allocate; 65 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); 66 // result can be one of two: 67 // the old top value: the exchange succeeded 68 // otherwise: the new value of the top is returned. 69 if (result == obj) { 70 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 71 *actual_size = want_to_allocate; 72 return obj; 73 } 74 } else { 75 return NULL; 76 } 77 } while (true); 78 } 79 80 inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, 81 size_t desired_word_size, 82 size_t* actual_size) { 83 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 84 if (res != NULL) { 85 _bot_part.alloc_block(res, *actual_size); 86 } 87 return res; 88 } 89 90 inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { 91 size_t temp; 92 return allocate(word_size, word_size, &temp); 93 } 94 95 inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { 96 size_t temp; 97 return par_allocate(word_size, word_size, &temp); 98 } 99 100 // Because of the requirement of keeping "_offsets" up to date with the 101 // allocations, we sequentialize these with a lock. Therefore, best if 102 // this is used for larger LAB allocations only. 103 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, 104 size_t desired_word_size, 105 size_t* actual_size) { 106 MutexLocker x(&_par_alloc_lock); 107 return allocate(min_word_size, desired_word_size, actual_size); 108 } 109 110 inline HeapWord* G1ContiguousSpace::block_start(const void* p) { 111 return _bot_part.block_start(p); 112 } 113 114 inline HeapWord* 115 G1ContiguousSpace::block_start_const(const void* p) const { 116 return _bot_part.block_start_const(p); 117 } 118 119 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { 120 HeapWord* addr = (HeapWord*) obj; 121 122 assert(addr < top(), "must be"); 123 assert(!is_closed_archive(), 124 "Closed archive regions should not have references into other regions"); 125 assert(!is_humongous(), "Humongous objects not handled here"); 126 bool obj_is_dead = is_obj_dead(obj, prev_bitmap); 127 128 if (ClassUnloadingWithConcurrentMark && obj_is_dead) { 129 assert(!block_is_obj(addr), "must be"); 130 *size = block_size_using_bitmap(addr, prev_bitmap); 131 } else { 132 assert(block_is_obj(addr), "must be"); 133 *size = obj->size(); 134 } 135 return obj_is_dead; 136 } 137 138 inline bool 139 HeapRegion::block_is_obj(const HeapWord* p) const { 140 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 141 142 if (!this->is_in(p)) { 143 assert(is_continues_humongous(), "This case can only happen for humongous regions"); 144 return (p == humongous_start_region()->bottom()); 145 } 146 if (ClassUnloadingWithConcurrentMark) { 147 return !g1h->is_obj_dead(oop(p), this); 148 } 149 return p < top(); 150 } 151 152 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const { 153 assert(ClassUnloadingWithConcurrentMark, 154 "All blocks should be objects if class unloading isn't used, so this method should not be called. " 155 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 156 "addr: " PTR_FORMAT, 157 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 158 159 // Old regions' dead objects may have dead classes 160 // We need to find the next live object using the bitmap 161 HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start()); 162 163 assert(next > addr, "must get the next live object"); 164 return pointer_delta(next, addr); 165 } 166 167 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { 168 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); 169 return !obj_allocated_since_prev_marking(obj) && 170 !prev_bitmap->is_marked((HeapWord*)obj) && 171 !is_open_archive(); 172 } 173 174 inline size_t HeapRegion::block_size(const HeapWord *addr) const { 175 if (addr == top()) { 176 return pointer_delta(end(), addr); 177 } 178 179 if (block_is_obj(addr)) { 180 return oop(addr)->size(); 181 } 182 183 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap()); 184 } 185 186 inline void HeapRegion::complete_compaction() { 187 // Reset space and bot after compaction is complete if needed. 188 reset_after_compaction(); 189 if (used_region().is_empty()) { 190 reset_bot(); 191 } 192 193 // After a compaction the mark bitmap is invalid, so we must 194 // treat all objects as being inside the unmarked area. 195 zero_marked_bytes(); 196 init_top_at_mark_start(); 197 198 // Clear unused heap memory in debug builds. 199 if (ZapUnusedHeapArea) { 200 mangle_unused_area(); 201 } 202 } 203 204 template<typename ApplyToMarkedClosure> 205 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { 206 HeapWord* limit = scan_limit(); 207 HeapWord* next_addr = bottom(); 208 209 while (next_addr < limit) { 210 Prefetch::write(next_addr, PrefetchScanIntervalInBytes); 211 // This explicit is_marked check is a way to avoid 212 // some extra work done by get_next_marked_addr for 213 // the case where next_addr is marked. 214 if (bitmap->is_marked(next_addr)) { 215 oop current = oop(next_addr); 216 next_addr += closure->apply(current); 217 } else { 218 next_addr = bitmap->get_next_marked_addr(next_addr, limit); 219 } 220 } 221 222 assert(next_addr == limit, "Should stop the scan at the limit."); 223 } 224 225 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 226 size_t desired_word_size, 227 size_t* actual_word_size) { 228 assert(is_young(), "we can only skip BOT updates on young regions"); 229 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 230 } 231 232 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 233 size_t temp; 234 return allocate_no_bot_updates(word_size, word_size, &temp); 235 } 236 237 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 238 size_t desired_word_size, 239 size_t* actual_word_size) { 240 assert(is_young(), "we can only skip BOT updates on young regions"); 241 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 242 } 243 244 inline void HeapRegion::note_start_of_marking() { 245 _next_marked_bytes = 0; 246 _next_top_at_mark_start = top(); 247 } 248 249 inline void HeapRegion::note_end_of_marking() { 250 _prev_top_at_mark_start = _next_top_at_mark_start; 251 _next_top_at_mark_start = bottom(); 252 _prev_marked_bytes = _next_marked_bytes; 253 _next_marked_bytes = 0; 254 } 255 256 inline bool HeapRegion::in_collection_set() const { 257 return G1CollectedHeap::heap()->is_in_cset(this); 258 } 259 260 template <class Closure, bool is_gc_active> 261 HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr, 262 Closure* cl, 263 G1CollectedHeap* g1h) { 264 assert(is_humongous(), "precondition"); 265 HeapRegion* sr = humongous_start_region(); 266 oop obj = oop(sr->bottom()); 267 268 // If concurrent and klass_or_null is NULL, then space has been 269 // allocated but the object has not yet been published by setting 270 // the klass. That can only happen if the card is stale. However, 271 // we've already set the card clean, so we must return failure, 272 // since the allocating thread could have performed a write to the 273 // card that might be missed otherwise. 274 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { 275 return NULL; 276 } 277 278 // We have a well-formed humongous object at the start of sr. 279 // Only filler objects follow a humongous object in the containing 280 // regions, and we can ignore those. So only process the one 281 // humongous object. 282 if (g1h->is_obj_dead(obj, sr)) { 283 // The object is dead. There can be no other object in this region, so return 284 // the end of that region. 285 return end(); 286 } 287 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 288 // objArrays are always marked precisely, so limit processing 289 // with mr. Non-objArrays might be precisely marked, and since 290 // it's humongous it's worthwhile avoiding full processing. 291 // However, the card could be stale and only cover filler 292 // objects. That should be rare, so not worth checking for; 293 // instead let it fall out from the bounded iteration. 294 obj->oop_iterate(cl, mr); 295 return mr.end(); 296 } else { 297 // If obj is not an objArray and mr contains the start of the 298 // obj, then this could be an imprecise mark, and we need to 299 // process the entire object. 300 int size = obj->oop_iterate_size(cl); 301 // We have scanned to the end of the object, but since there can be no objects 302 // after this humongous object in the region, we can return the end of the 303 // region if it is greater. 304 return MAX2((HeapWord*)obj + size, mr.end()); 305 } 306 } 307 308 template <bool is_gc_active, class Closure> 309 HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr, 310 Closure* cl) { 311 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 312 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 313 314 // Special handling for humongous regions. 315 if (is_humongous()) { 316 return do_oops_on_memregion_in_humongous<Closure, is_gc_active>(mr, cl, g1h); 317 } 318 assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); 319 320 // Because mr has been trimmed to what's been allocated in this 321 // region, the parts of the heap that are examined here are always 322 // parsable; there's no need to use klass_or_null to detect 323 // in-progress allocation. 324 325 // Cache the boundaries of the memory region in some const locals 326 HeapWord* const start = mr.start(); 327 HeapWord* const end = mr.end(); 328 329 // Find the obj that extends onto mr.start(). 330 // Update BOT as needed while finding start of (possibly dead) 331 // object containing the start of the region. 332 HeapWord* cur = block_start(start); 333 334 #ifdef ASSERT 335 { 336 assert(cur <= start, 337 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 338 HeapWord* next = cur + block_size(cur); 339 assert(start < next, 340 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 341 } 342 #endif 343 344 const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap(); 345 while (true) { 346 oop obj = oop(cur); 347 assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur)); 348 assert(obj->klass_or_null() != NULL, 349 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 350 351 size_t size; 352 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); 353 bool is_precise = false; 354 355 cur += size; 356 if (!is_dead) { 357 // Process live object's references. 358 359 // Non-objArrays are usually marked imprecise at the object 360 // start, in which case we need to iterate over them in full. 361 // objArrays are precisely marked, but can still be iterated 362 // over in full if completely covered. 363 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 364 obj->oop_iterate(cl); 365 } else { 366 obj->oop_iterate(cl, mr); 367 is_precise = true; 368 } 369 } 370 if (cur >= end) { 371 return is_precise ? end : cur; 372 } 373 } 374 } 375 376 #endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP