1 /* 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP 27 28 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 30 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" 31 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 32 #include "oops/oop.psgc.inline.hpp" 33 34 inline PSPromotionManager* PSPromotionManager::manager_array(int index) { 35 assert(_manager_array != NULL, "access of NULL manager_array"); 36 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access"); 37 return &_manager_array[index]; 38 } 39 40 template <class T> 41 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { 42 if (p != NULL) { // XXX: error if p != NULL here 43 oop o = oopDesc::load_decode_heap_oop_not_null(p); 44 if (o->is_forwarded()) { 45 o = o->forwardee(); 46 // Card mark 47 if (PSScavenge::is_obj_in_young(o)) { 48 PSScavenge::card_table()->inline_write_ref_field_gc(p, o); 49 } 50 oopDesc::encode_store_heap_oop_not_null(p, o); 51 } else { 52 push_depth(p); 53 } 54 } 55 } 56 57 template <class T> 58 inline void PSPromotionManager::claim_or_forward_depth(T* p) { 59 assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); 60 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, 61 "Sanity"); 62 assert(Universe::heap()->is_in(p), "pointer outside heap"); 63 64 claim_or_forward_internal_depth(p); 65 } 66 67 // 68 // This method is pretty bulky. It would be nice to split it up 69 // into smaller submethods, but we need to be careful not to hurt 70 // performance. 71 // 72 template<bool promote_immediately> 73 oop PSPromotionManager::copy_to_survivor_space(oop o) { 74 assert(PSScavenge::should_scavenge(&o), "Sanity"); 75 76 oop new_obj = NULL; 77 78 // NOTE! We must be very careful with any methods that access the mark 79 // in o. There may be multiple threads racing on it, and it may be forwarded 80 // at any time. Do not use oop methods for accessing the mark! 81 markOop test_mark = o->mark(); 82 83 // The same test as "o->is_forwarded()" 84 if (!test_mark->is_marked()) { 85 bool new_obj_is_tenured = false; 86 size_t new_obj_size = o->size(); 87 // Find the objects age, MT safe. 88 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 89 test_mark->displaced_mark_helper()->age() : test_mark->age(); 90 91 if (!promote_immediately) { 92 // Try allocating obj in to-space (unless too old) 93 if (age < PSScavenge::tenuring_threshold()) { 94 new_obj = (oop) _young_lab.allocate(new_obj_size); 95 if (new_obj == NULL && !_young_gen_is_full) { 96 // Do we allocate directly, or flush and refill? 97 if (new_obj_size > (YoungPLABSize / 2)) { 98 // Allocate this object directly 99 PSScavenge::_gc_tracer.report_promotion_to_new_plab(o, age, false, new_obj_size); 100 new_obj = (oop)young_space()->cas_allocate(new_obj_size); 101 } else { 102 // Flush and fill 103 _young_lab.flush(); 104 105 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 106 if (lab_base != NULL) { 107 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 108 // Try the young lab allocation again. 109 PSScavenge::_gc_tracer.report_promotion_to_new_plab(o, age, false, _young_lab.capacity()); 110 new_obj = (oop) _young_lab.allocate(new_obj_size); 111 } else { 112 _young_gen_is_full = true; 113 } 114 } 115 } 116 } 117 } 118 119 // Otherwise try allocating obj tenured 120 if (new_obj == NULL) { 121 #ifndef PRODUCT 122 if (Universe::heap()->promotion_should_fail()) { 123 return oop_promotion_failed(o, test_mark); 124 } 125 #endif // #ifndef PRODUCT 126 127 new_obj = (oop) _old_lab.allocate(new_obj_size); 128 new_obj_is_tenured = true; 129 130 if (new_obj == NULL) { 131 if (!_old_gen_is_full) { 132 // Do we allocate directly, or flush and refill? 133 if (new_obj_size > (OldPLABSize / 2)) { 134 // Allocate this object directly 135 PSScavenge::_gc_tracer.report_promotion_to_new_plab(o, age, true, new_obj_size); 136 new_obj = (oop)old_gen()->cas_allocate(new_obj_size); 137 } else { 138 // Flush and fill 139 _old_lab.flush(); 140 141 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); 142 if(lab_base != NULL) { 143 #ifdef ASSERT 144 // Delay the initialization of the promotion lab (plab). 145 // This exposes uninitialized plabs to card table processing. 146 if (GCWorkerDelayMillis > 0) { 147 os::sleep(Thread::current(), GCWorkerDelayMillis, false); 148 } 149 #endif 150 _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 151 PSScavenge::_gc_tracer.report_promotion_to_new_plab(o, age, true, _old_lab.capacity()); 152 // Try the old lab allocation again. 153 new_obj = (oop) _old_lab.allocate(new_obj_size); 154 } 155 } 156 } 157 158 // This is the promotion failed test, and code handling. 159 // The code belongs here for two reasons. It is slightly 160 // different than the code below, and cannot share the 161 // CAS testing code. Keeping the code here also minimizes 162 // the impact on the common case fast path code. 163 164 if (new_obj == NULL) { 165 _old_gen_is_full = true; 166 return oop_promotion_failed(o, test_mark); 167 } 168 } 169 } 170 171 assert(new_obj != NULL, "allocation should have succeeded"); 172 173 // Copy obj 174 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); 175 176 // Now we have to CAS in the header. 177 if (o->cas_forward_to(new_obj, test_mark)) { 178 // We won any races, we "own" this object. 179 assert(new_obj == o->forwardee(), "Sanity"); 180 181 // Increment age if obj still in new generation. Now that 182 // we're dealing with a markOop that cannot change, it is 183 // okay to use the non mt safe oop methods. 184 if (!new_obj_is_tenured) { 185 new_obj->incr_age(); 186 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 187 } 188 189 // Do the size comparison first with new_obj_size, which we 190 // already have. Hopefully, only a few objects are larger than 191 // _min_array_size_for_chunking, and most of them will be arrays. 192 // So, the is->objArray() test would be very infrequent. 193 if (new_obj_size > _min_array_size_for_chunking && 194 new_obj->is_objArray() && 195 PSChunkLargeArrays) { 196 // we'll chunk it 197 oop* const masked_o = mask_chunked_array_oop(o); 198 push_depth(masked_o); 199 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); 200 } else { 201 // we'll just push its contents 202 new_obj->push_contents(this); 203 } 204 } else { 205 // We lost, someone else "owns" this object 206 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); 207 208 // Try to deallocate the space. If it was directly allocated we cannot 209 // deallocate it, so we have to test. If the deallocation fails, 210 // overwrite with a filler object. 211 if (new_obj_is_tenured) { 212 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 213 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 214 } 215 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 216 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 217 } 218 219 // don't update this before the unallocation! 220 new_obj = o->forwardee(); 221 } 222 } else { 223 assert(o->is_forwarded(), "Sanity"); 224 new_obj = o->forwardee(); 225 } 226 227 #ifndef PRODUCT 228 // This code must come after the CAS test, or it will print incorrect 229 // information. 230 if (TraceScavenge) { 231 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 232 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", 233 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); 234 } 235 #endif 236 237 return new_obj; 238 } 239 240 241 inline void PSPromotionManager::process_popped_location_depth(StarTask p) { 242 if (is_oop_masked(p)) { 243 assert(PSChunkLargeArrays, "invariant"); 244 oop const old = unmask_chunked_array_oop(p); 245 process_array_chunk(old); 246 } else { 247 if (p.is_narrow()) { 248 assert(UseCompressedOops, "Error"); 249 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p); 250 } else { 251 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p); 252 } 253 } 254 } 255 256 #if TASKQUEUE_STATS 257 void PSPromotionManager::record_steal(StarTask& p) { 258 if (is_oop_masked(p)) { 259 ++_masked_steals; 260 } 261 } 262 #endif // TASKQUEUE_STATS 263 264 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP