1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/heapRegion.hpp" 31 #include "gc/shared/space.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 35 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, 36 size_t desired_word_size, 37 size_t* actual_size) { 38 HeapWord* obj = top(); 39 size_t available = pointer_delta(end(), obj); 40 size_t want_to_allocate = MIN2(available, desired_word_size); 41 if (want_to_allocate >= min_word_size) { 42 HeapWord* new_top = obj + want_to_allocate; 43 set_top(new_top); 44 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 45 *actual_size = want_to_allocate; 46 return obj; 47 } else { 48 return NULL; 49 } 50 } 51 52 inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, 53 size_t desired_word_size, 54 size_t* actual_size) { 55 do { 56 HeapWord* obj = top(); 57 size_t available = pointer_delta(end(), obj); 58 size_t want_to_allocate = MIN2(available, desired_word_size); 59 if (want_to_allocate >= min_word_size) { 60 HeapWord* new_top = obj + want_to_allocate; 61 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 62 // result can be one of two: 63 // the old top value: the exchange succeeded 64 // otherwise: the new value of the top is returned. 65 if (result == obj) { 66 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 67 *actual_size = want_to_allocate; 68 return obj; 69 } 70 } else { 71 return NULL; 72 } 73 } while (true); 74 } 75 76 inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, 77 size_t desired_word_size, 78 size_t* actual_size) { 79 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 80 if (res != NULL) { 81 _bot_part.alloc_block(res, *actual_size); 82 } 83 return res; 84 } 85 86 inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { 87 size_t temp; 88 return allocate(word_size, word_size, &temp); 89 } 90 91 inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { 92 size_t temp; 93 return par_allocate(word_size, word_size, &temp); 94 } 95 96 // Because of the requirement of keeping "_offsets" up to date with the 97 // allocations, we sequentialize these with a lock. Therefore, best if 98 // this is used for larger LAB allocations only. 99 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, 100 size_t desired_word_size, 101 size_t* actual_size) { 102 MutexLocker x(&_par_alloc_lock); 103 return allocate(min_word_size, desired_word_size, actual_size); 104 } 105 106 inline HeapWord* G1ContiguousSpace::block_start(const void* p) { 107 return _bot_part.block_start(p); 108 } 109 110 inline HeapWord* 111 G1ContiguousSpace::block_start_const(const void* p) const { 112 return _bot_part.block_start_const(p); 113 } 114 115 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const { 116 HeapWord* addr = (HeapWord*) obj; 117 118 assert(addr < top(), "must be"); 119 assert(!is_archive(), "Archive regions should not have references into interesting regions."); 120 assert(!is_humongous(), "Humongous objects not handled here"); 121 bool obj_is_dead = is_obj_dead(obj, prev_bitmap); 122 123 if (ClassUnloadingWithConcurrentMark && obj_is_dead) { 124 assert(!block_is_obj(addr), "must be"); 125 *size = block_size_using_bitmap(addr, prev_bitmap); 126 } else { 127 assert(block_is_obj(addr), "must be"); 128 *size = obj->size(); 129 } 130 return obj_is_dead; 131 } 132 133 inline bool 134 HeapRegion::block_is_obj(const HeapWord* p) const { 135 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 136 137 if (!this->is_in(p)) { 138 assert(is_continues_humongous(), "This case can only happen for humongous regions"); 139 return (p == humongous_start_region()->bottom()); 140 } 141 if (ClassUnloadingWithConcurrentMark) { 142 return !g1h->is_obj_dead(oop(p), this); 143 } 144 return p < top(); 145 } 146 147 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMapRO* prev_bitmap) const { 148 assert(ClassUnloadingWithConcurrentMark, 149 "All blocks should be objects if class unloading isn't used, so this method should not be called. " 150 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 151 "addr: " PTR_FORMAT, 152 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 153 154 // Old regions' dead objects may have dead classes 155 // We need to find the next live object using the bitmap 156 HeapWord* next = prev_bitmap->getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 157 158 assert(next > addr, "must get the next live object"); 159 return pointer_delta(next, addr); 160 } 161 162 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const { 163 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); 164 return !obj_allocated_since_prev_marking(obj) && !prev_bitmap->isMarked((HeapWord*)obj); 165 } 166 167 inline size_t HeapRegion::block_size(const HeapWord *addr) const { 168 if (addr == top()) { 169 return pointer_delta(end(), addr); 170 } 171 172 if (block_is_obj(addr)) { 173 return oop(addr)->size(); 174 } 175 176 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap()); 177 } 178 179 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 180 size_t desired_word_size, 181 size_t* actual_word_size) { 182 assert(is_young(), "we can only skip BOT updates on young regions"); 183 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 184 } 185 186 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 187 size_t temp; 188 return allocate_no_bot_updates(word_size, word_size, &temp); 189 } 190 191 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 192 size_t desired_word_size, 193 size_t* actual_word_size) { 194 assert(is_young(), "we can only skip BOT updates on young regions"); 195 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 196 } 197 198 inline void HeapRegion::note_start_of_marking() { 199 _next_marked_bytes = 0; 200 _next_top_at_mark_start = top(); 201 } 202 203 inline void HeapRegion::note_end_of_marking() { 204 _prev_top_at_mark_start = _next_top_at_mark_start; 205 _prev_marked_bytes = _next_marked_bytes; 206 _next_marked_bytes = 0; 207 } 208 209 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { 210 if (is_survivor()) { 211 // This is how we always allocate survivors. 212 assert(_next_top_at_mark_start == bottom(), "invariant"); 213 } else { 214 if (during_initial_mark) { 215 // During initial-mark we'll explicitly mark any objects on old 216 // regions that are pointed to by roots. Given that explicit 217 // marks only make sense under NTAMS it'd be nice if we could 218 // check that condition if we wanted to. Given that we don't 219 // know where the top of this region will end up, we simply set 220 // NTAMS to the end of the region so all marks will be below 221 // NTAMS. We'll set it to the actual top when we retire this region. 222 _next_top_at_mark_start = end(); 223 } else { 224 // We could have re-used this old region as to-space over a 225 // couple of GCs since the start of the concurrent marking 226 // cycle. This means that [bottom,NTAMS) will contain objects 227 // copied up to and including initial-mark and [NTAMS, top) 228 // will contain objects copied during the concurrent marking cycle. 229 assert(top() >= _next_top_at_mark_start, "invariant"); 230 } 231 } 232 } 233 234 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { 235 if (is_survivor()) { 236 // This is how we always allocate survivors. 237 assert(_next_top_at_mark_start == bottom(), "invariant"); 238 } else { 239 if (during_initial_mark) { 240 // See the comment for note_start_of_copying() for the details 241 // on this. 242 assert(_next_top_at_mark_start == end(), "pre-condition"); 243 _next_top_at_mark_start = top(); 244 } else { 245 // See the comment for note_start_of_copying() for the details 246 // on this. 247 assert(top() >= _next_top_at_mark_start, "invariant"); 248 } 249 } 250 } 251 252 inline bool HeapRegion::in_collection_set() const { 253 return G1CollectedHeap::heap()->is_in_cset(this); 254 } 255 256 template <class Closure, bool is_gc_active> 257 bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr, 258 Closure* cl, 259 G1CollectedHeap* g1h) { 260 assert(is_humongous(), "precondition"); 261 HeapRegion* sr = humongous_start_region(); 262 oop obj = oop(sr->bottom()); 263 264 // If concurrent and klass_or_null is NULL, then space has been 265 // allocated but the object has not yet been published by setting 266 // the klass. That can only happen if the card is stale. However, 267 // we've already set the card clean, so we must return failure, 268 // since the allocating thread could have performed a write to the 269 // card that might be missed otherwise. 270 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { 271 return false; 272 } 273 274 // We have a well-formed humongous object at the start of sr. 275 // Only filler objects follow a humongous object in the containing 276 // regions, and we can ignore those. So only process the one 277 // humongous object. 278 if (!g1h->is_obj_dead(obj, sr)) { 279 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 280 // objArrays are always marked precisely, so limit processing 281 // with mr. Non-objArrays might be precisely marked, and since 282 // it's humongous it's worthwhile avoiding full processing. 283 // However, the card could be stale and only cover filler 284 // objects. That should be rare, so not worth checking for; 285 // instead let it fall out from the bounded iteration. 286 obj->oop_iterate(cl, mr); 287 } else { 288 // If obj is not an objArray and mr contains the start of the 289 // obj, then this could be an imprecise mark, and we need to 290 // process the entire object. 291 obj->oop_iterate(cl); 292 } 293 } 294 return true; 295 } 296 297 template <bool is_gc_active, class Closure> 298 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, 299 Closure* cl) { 300 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 301 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 302 303 // Special handling for humongous regions. 304 if (is_humongous()) { 305 return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h); 306 } 307 assert(is_old(), "precondition"); 308 309 // Because mr has been trimmed to what's been allocated in this 310 // region, the parts of the heap that are examined here are always 311 // parsable; there's no need to use klass_or_null to detect 312 // in-progress allocation. 313 314 // Cache the boundaries of the memory region in some const locals 315 HeapWord* const start = mr.start(); 316 HeapWord* const end = mr.end(); 317 318 // Find the obj that extends onto mr.start(). 319 // Update BOT as needed while finding start of (possibly dead) 320 // object containing the start of the region. 321 HeapWord* cur = block_start(start); 322 323 #ifdef ASSERT 324 { 325 assert(cur <= start, 326 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 327 HeapWord* next = cur + block_size(cur); 328 assert(start < next, 329 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 330 } 331 #endif 332 333 G1CMBitMapRO* bitmap = g1h->concurrent_mark()->prevMarkBitMap(); 334 do { 335 oop obj = oop(cur); 336 assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur)); 337 assert(obj->klass_or_null() != NULL, 338 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 339 340 size_t size; 341 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); 342 343 cur += size; 344 if (!is_dead) { 345 // Process live object's references. 346 347 // Non-objArrays are usually marked imprecise at the object 348 // start, in which case we need to iterate over them in full. 349 // objArrays are precisely marked, but can still be iterated 350 // over in full if completely covered. 351 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 352 obj->oop_iterate(cl); 353 } else { 354 obj->oop_iterate(cl, mr); 355 } 356 } 357 } while (cur < end); 358 359 return true; 360 } 361 362 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP