1 /* 2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 26 #define SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP 27 28 #include "gc/parallel/parallelScavengeHeap.hpp" 29 #include "gc/parallel/parMarkBitMap.inline.hpp" 30 #include "gc/parallel/psOldGen.hpp" 31 #include "gc/parallel/psPromotionLAB.inline.hpp" 32 #include "gc/parallel/psPromotionManager.hpp" 33 #include "gc/parallel/psScavenge.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "logging/log.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 39 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) { 40 assert(_manager_array != NULL, "access of NULL manager_array"); 41 assert(index <= ParallelGCThreads, "out of range manager_array access"); 42 return &_manager_array[index]; 43 } 44 45 template <class T> 46 inline void PSPromotionManager::push_depth(T* p) { 47 claimed_stack_depth()->push(p); 48 } 49 50 template <class T> 51 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { 52 if (p != NULL) { // XXX: error if p != NULL here 53 oop o = RawAccess<OOP_NOT_NULL>::oop_load(p); 54 if (o->is_forwarded()) { 55 o = o->forwardee(); 56 // Card mark 57 if (PSScavenge::is_obj_in_young(o)) { 58 PSScavenge::card_table()->inline_write_ref_field_gc(p, o); 59 } 60 RawAccess<OOP_NOT_NULL>::oop_store(p, o); 61 } else { 62 push_depth(p); 63 } 64 } 65 } 66 67 template <class T> 68 inline void PSPromotionManager::claim_or_forward_depth(T* p) { 69 assert(should_scavenge(p, true), "revisiting object?"); 70 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap"); 71 72 claim_or_forward_internal_depth(p); 73 } 74 75 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, 76 size_t obj_size, 77 uint age, bool tenured, 78 const PSPromotionLAB* lab) { 79 // Skip if memory allocation failed 80 if (new_obj != NULL) { 81 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); 82 83 if (lab != NULL) { 84 // Promotion of object through newly allocated PLAB 85 if (gc_tracer->should_report_promotion_in_new_plab_event()) { 86 size_t obj_bytes = obj_size * HeapWordSize; 87 size_t lab_size = lab->capacity(); 88 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, 89 age, tenured, lab_size); 90 } 91 } else { 92 // Promotion of object directly to heap 93 if (gc_tracer->should_report_promotion_outside_plab_event()) { 94 size_t obj_bytes = obj_size * HeapWordSize; 95 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, 96 age, tenured); 97 } 98 } 99 } 100 } 101 102 inline void PSPromotionManager::push_contents(oop obj) { 103 obj->ps_push_contents(this); 104 } 105 // 106 // This method is pretty bulky. It would be nice to split it up 107 // into smaller submethods, but we need to be careful not to hurt 108 // performance. 109 // 110 template<bool promote_immediately> 111 inline oop PSPromotionManager::copy_to_survivor_space(oop o) { 112 assert(should_scavenge(&o), "Sanity"); 113 114 oop new_obj = NULL; 115 116 // NOTE! We must be very careful with any methods that access the mark 117 // in o. There may be multiple threads racing on it, and it may be forwarded 118 // at any time. Do not use oop methods for accessing the mark! 119 markOop test_mark = o->mark(); 120 121 // The same test as "o->is_forwarded()" 122 if (!test_mark->is_marked()) { 123 bool new_obj_is_tenured = false; 124 size_t new_obj_size = o->size(); 125 126 // Find the objects age, MT safe. 127 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 128 test_mark->displaced_mark_helper()->age() : test_mark->age(); 129 130 if (!promote_immediately) { 131 // Try allocating obj in to-space (unless too old) 132 if (age < PSScavenge::tenuring_threshold()) { 133 new_obj = (oop) _young_lab.allocate(new_obj_size); 134 if (new_obj == NULL && !_young_gen_is_full) { 135 // Do we allocate directly, or flush and refill? 136 if (new_obj_size > (YoungPLABSize / 2)) { 137 // Allocate this object directly 138 new_obj = (oop)young_space()->cas_allocate(new_obj_size); 139 promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); 140 } else { 141 // Flush and fill 142 _young_lab.flush(); 143 144 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 145 if (lab_base != NULL) { 146 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 147 // Try the young lab allocation again. 148 new_obj = (oop) _young_lab.allocate(new_obj_size); 149 promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); 150 } else { 151 _young_gen_is_full = true; 152 } 153 } 154 } 155 } 156 } 157 158 // Otherwise try allocating obj tenured 159 if (new_obj == NULL) { 160 #ifndef PRODUCT 161 if (ParallelScavengeHeap::heap()->promotion_should_fail()) { 162 return oop_promotion_failed(o, test_mark); 163 } 164 #endif // #ifndef PRODUCT 165 166 new_obj = (oop) _old_lab.allocate(new_obj_size); 167 new_obj_is_tenured = true; 168 169 if (new_obj == NULL) { 170 if (!_old_gen_is_full) { 171 // Do we allocate directly, or flush and refill? 172 if (new_obj_size > (OldPLABSize / 2)) { 173 // Allocate this object directly 174 new_obj = (oop)old_gen()->cas_allocate(new_obj_size); 175 promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); 176 } else { 177 // Flush and fill 178 _old_lab.flush(); 179 180 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); 181 if(lab_base != NULL) { 182 #ifdef ASSERT 183 // Delay the initialization of the promotion lab (plab). 184 // This exposes uninitialized plabs to card table processing. 185 if (GCWorkerDelayMillis > 0) { 186 os::sleep(Thread::current(), GCWorkerDelayMillis, false); 187 } 188 #endif 189 _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 190 // Try the old lab allocation again. 191 new_obj = (oop) _old_lab.allocate(new_obj_size); 192 promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); 193 } 194 } 195 } 196 197 // This is the promotion failed test, and code handling. 198 // The code belongs here for two reasons. It is slightly 199 // different than the code below, and cannot share the 200 // CAS testing code. Keeping the code here also minimizes 201 // the impact on the common case fast path code. 202 203 if (new_obj == NULL) { 204 _old_gen_is_full = true; 205 return oop_promotion_failed(o, test_mark); 206 } 207 } 208 } 209 210 assert(new_obj != NULL, "allocation should have succeeded"); 211 212 // Copy obj 213 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); 214 215 // Now we have to CAS in the header. 216 if (o->cas_forward_to(new_obj, test_mark)) { 217 // We won any races, we "own" this object. 218 assert(new_obj == o->forwardee(), "Sanity"); 219 220 // Increment age if obj still in new generation. Now that 221 // we're dealing with a markOop that cannot change, it is 222 // okay to use the non mt safe oop methods. 223 if (!new_obj_is_tenured) { 224 new_obj->incr_age(); 225 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 226 } 227 228 // Do the size comparison first with new_obj_size, which we 229 // already have. Hopefully, only a few objects are larger than 230 // _min_array_size_for_chunking, and most of them will be arrays. 231 // So, the is->objArray() test would be very infrequent. 232 if (new_obj_size > _min_array_size_for_chunking && 233 new_obj->is_objArray() && 234 PSChunkLargeArrays) { 235 // we'll chunk it 236 oop* const masked_o = mask_chunked_array_oop(o); 237 push_depth(masked_o); 238 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); 239 } else { 240 // we'll just push its contents 241 push_contents(new_obj); 242 } 243 } else { 244 // We lost, someone else "owns" this object 245 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); 246 247 // Try to deallocate the space. If it was directly allocated we cannot 248 // deallocate it, so we have to test. If the deallocation fails, 249 // overwrite with a filler object. 250 if (new_obj_is_tenured) { 251 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 252 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 253 } 254 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 255 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 256 } 257 258 // don't update this before the unallocation! 259 new_obj = o->forwardee(); 260 } 261 } else { 262 assert(o->is_forwarded(), "Sanity"); 263 new_obj = o->forwardee(); 264 } 265 266 // This code must come after the CAS test, or it will print incorrect 267 // information. 268 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 269 should_scavenge(&new_obj) ? "copying" : "tenuring", 270 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); 271 272 return new_obj; 273 } 274 275 // Attempt to "claim" oop at p via CAS, push the new obj if successful 276 // This version tests the oop* to make sure it is within the heap before 277 // attempting marking. 278 template <class T, bool promote_immediately> 279 inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) { 280 assert(should_scavenge(p, true), "revisiting object?"); 281 282 oop o = RawAccess<OOP_NOT_NULL>::oop_load(p); 283 oop new_obj = o->is_forwarded() 284 ? o->forwardee() 285 : copy_to_survivor_space<promote_immediately>(o); 286 287 // This code must come after the CAS test, or it will print incorrect 288 // information. 289 if (log_develop_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) { 290 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 291 "forwarding", 292 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); 293 } 294 295 RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj); 296 297 // We cannot mark without test, as some code passes us pointers 298 // that are outside the heap. These pointers are either from roots 299 // or from metadata. 300 if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && 301 ParallelScavengeHeap::heap()->is_in_reserved(p)) { 302 if (PSScavenge::is_obj_in_young(new_obj)) { 303 PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj); 304 } 305 } 306 } 307 308 inline void PSPromotionManager::process_popped_location_depth(StarTask p) { 309 if (is_oop_masked(p)) { 310 assert(PSChunkLargeArrays, "invariant"); 311 oop const old = unmask_chunked_array_oop(p); 312 process_array_chunk(old); 313 } else { 314 if (p.is_narrow()) { 315 assert(UseCompressedOops, "Error"); 316 copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(p); 317 } else { 318 copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(p); 319 } 320 } 321 } 322 323 inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) { 324 return stack_array_depth()->steal(queue_num, seed, t); 325 } 326 327 #if TASKQUEUE_STATS 328 void PSPromotionManager::record_steal(StarTask& p) { 329 if (is_oop_masked(p)) { 330 ++_masked_steals; 331 } 332 } 333 #endif // TASKQUEUE_STATS 334 335 #endif // SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP