1 /* 2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/mutableSpace.hpp" 27 #include "gc/parallel/parallelScavengeHeap.hpp" 28 #include "gc/parallel/psOldGen.hpp" 29 #include "gc/parallel/psPromotionManager.inline.hpp" 30 #include "gc/parallel/psScavenge.inline.hpp" 31 #include "gc/shared/gcTrace.hpp" 32 #include "gc/shared/taskqueue.inline.hpp" 33 #include "logging/log.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/memRegion.hpp" 36 #include "memory/padded.inline.hpp" 37 #include "oops/instanceKlass.inline.hpp" 38 #include "oops/instanceMirrorKlass.inline.hpp" 39 #include "oops/objArrayKlass.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 42 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL; 43 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; 44 PSOldGen* PSPromotionManager::_old_gen = NULL; 45 MutableSpace* PSPromotionManager::_young_space = NULL; 46 47 void PSPromotionManager::initialize() { 48 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 49 50 _old_gen = heap->old_gen(); 51 _young_space = heap->young_gen()->to_space(); 52 53 // To prevent false sharing, we pad the PSPromotionManagers 54 // and make sure that the first instance starts at a cache line. 55 assert(_manager_array == NULL, "Attempt to initialize twice"); 56 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1); 57 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); 58 59 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); 60 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); 61 62 // Create and register the PSPromotionManager(s) for the worker threads. 63 for(uint i=0; i<ParallelGCThreads; i++) { 64 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth()); 65 } 66 // The VMThread gets its own PSPromotionManager, which is not available 67 // for work stealing. 68 } 69 70 // Helper functions to get around the circular dependency between 71 // psScavenge.inline.hpp and psPromotionManager.inline.hpp. 72 bool PSPromotionManager::should_scavenge(oop* p, bool check_to_space) { 73 return PSScavenge::should_scavenge(p, check_to_space); 74 } 75 bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) { 76 return PSScavenge::should_scavenge(p, check_to_space); 77 } 78 79 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) { 80 assert(index < ParallelGCThreads, "index out of range"); 81 assert(_manager_array != NULL, "Sanity"); 82 return &_manager_array[index]; 83 } 84 85 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() { 86 assert(_manager_array != NULL, "Sanity"); 87 return &_manager_array[ParallelGCThreads]; 88 } 89 90 void PSPromotionManager::pre_scavenge() { 91 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 92 93 _young_space = heap->young_gen()->to_space(); 94 95 for(uint i=0; i<ParallelGCThreads+1; i++) { 96 manager_array(i)->reset(); 97 } 98 } 99 100 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) { 101 bool promotion_failure_occurred = false; 102 103 TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); 104 for (uint i = 0; i < ParallelGCThreads + 1; i++) { 105 PSPromotionManager* manager = manager_array(i); 106 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); 107 if (manager->_promotion_failed_info.has_failed()) { 108 gc_tracer.report_promotion_failed(manager->_promotion_failed_info); 109 promotion_failure_occurred = true; 110 } 111 manager->flush_labs(); 112 } 113 return promotion_failure_occurred; 114 } 115 116 #if TASKQUEUE_STATS 117 void 118 PSPromotionManager::print_local_stats(outputStream* const out, uint i) const { 119 #define FMT " " SIZE_FORMAT_W(10) 120 out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals, 121 _arrays_chunked, _array_chunks_processed); 122 #undef FMT 123 } 124 125 static const char* const pm_stats_hdr[] = { 126 " --------masked------- arrays array", 127 "thr push steal chunked chunks", 128 "--- ---------- ---------- ---------- ----------" 129 }; 130 131 void 132 PSPromotionManager::print_taskqueue_stats() { 133 if (!develop_log_is_enabled(Trace, gc, task, stats)) { 134 return; 135 } 136 LogHandle(gc, task, stats) log; 137 ResourceMark rm; 138 outputStream* out = log.trace_stream(); 139 out->print_cr("== GC Tasks Stats, GC %3d", 140 ParallelScavengeHeap::heap()->total_collections()); 141 142 TaskQueueStats totals; 143 out->print("thr "); TaskQueueStats::print_header(1, out); out->cr(); 144 out->print("--- "); TaskQueueStats::print_header(2, out); out->cr(); 145 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 146 TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats; 147 out->print("%3d ", i); next.print(out); out->cr(); 148 totals += next; 149 } 150 out->print("tot "); totals.print(out); out->cr(); 151 152 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]); 153 for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]); 154 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 155 manager_array(i)->print_local_stats(out, i); 156 } 157 } 158 159 void 160 PSPromotionManager::reset_stats() { 161 claimed_stack_depth()->stats.reset(); 162 _masked_pushes = _masked_steals = 0; 163 _arrays_chunked = _array_chunks_processed = 0; 164 } 165 #endif // TASKQUEUE_STATS 166 167 PSPromotionManager::PSPromotionManager() { 168 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 169 170 // We set the old lab's start array. 171 _old_lab.set_start_array(old_gen()->start_array()); 172 173 uint queue_size; 174 claimed_stack_depth()->initialize(); 175 queue_size = claimed_stack_depth()->max_elems(); 176 177 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); 178 if (_totally_drain) { 179 _target_stack_size = 0; 180 } else { 181 // don't let the target stack size to be more than 1/4 of the entries 182 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize, 183 (uint) (queue_size / 4)); 184 } 185 186 _array_chunk_size = ParGCArrayScanChunk; 187 // let's choose 1.5x the chunk size 188 _min_array_size_for_chunking = 3 * _array_chunk_size / 2; 189 190 reset(); 191 } 192 193 void PSPromotionManager::reset() { 194 assert(stacks_empty(), "reset of non-empty stack"); 195 196 // We need to get an assert in here to make sure the labs are always flushed. 197 198 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 199 200 // Do not prefill the LAB's, save heap wastage! 201 HeapWord* lab_base = young_space()->top(); 202 _young_lab.initialize(MemRegion(lab_base, (size_t)0)); 203 _young_gen_is_full = false; 204 205 lab_base = old_gen()->object_space()->top(); 206 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); 207 _old_gen_is_full = false; 208 209 _promotion_failed_info.reset(); 210 211 TASKQUEUE_STATS_ONLY(reset_stats()); 212 } 213 214 215 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { 216 totally_drain = totally_drain || _totally_drain; 217 218 #ifdef ASSERT 219 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 220 MutableSpace* to_space = heap->young_gen()->to_space(); 221 MutableSpace* old_space = heap->old_gen()->object_space(); 222 #endif /* ASSERT */ 223 224 OopStarTaskQueue* const tq = claimed_stack_depth(); 225 do { 226 StarTask p; 227 228 // Drain overflow stack first, so other threads can steal from 229 // claimed stack while we work. 230 while (tq->pop_overflow(p)) { 231 process_popped_location_depth(p); 232 } 233 234 if (totally_drain) { 235 while (tq->pop_local(p)) { 236 process_popped_location_depth(p); 237 } 238 } else { 239 while (tq->size() > _target_stack_size && tq->pop_local(p)) { 240 process_popped_location_depth(p); 241 } 242 } 243 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); 244 245 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); 246 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); 247 assert(tq->overflow_empty(), "Sanity"); 248 } 249 250 void PSPromotionManager::flush_labs() { 251 assert(stacks_empty(), "Attempt to flush lab with live stack"); 252 253 // If either promotion lab fills up, we can flush the 254 // lab but not refill it, so check first. 255 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); 256 if (!_young_lab.is_flushed()) 257 _young_lab.flush(); 258 259 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity"); 260 if (!_old_lab.is_flushed()) 261 _old_lab.flush(); 262 263 // Let PSScavenge know if we overflowed 264 if (_young_gen_is_full) { 265 PSScavenge::set_survivor_overflow(true); 266 } 267 } 268 269 template <class T> void PSPromotionManager::process_array_chunk_work( 270 oop obj, 271 int start, int end) { 272 assert(start <= end, "invariant"); 273 T* const base = (T*)objArrayOop(obj)->base(); 274 T* p = base + start; 275 T* const chunk_end = base + end; 276 while (p < chunk_end) { 277 if (PSScavenge::should_scavenge(p)) { 278 claim_or_forward_depth(p); 279 } 280 ++p; 281 } 282 } 283 284 void PSPromotionManager::process_array_chunk(oop old) { 285 assert(PSChunkLargeArrays, "invariant"); 286 assert(old->is_objArray(), "invariant"); 287 assert(old->is_forwarded(), "invariant"); 288 289 TASKQUEUE_STATS_ONLY(++_array_chunks_processed); 290 291 oop const obj = old->forwardee(); 292 293 int start; 294 int const end = arrayOop(old)->length(); 295 if (end > (int) _min_array_size_for_chunking) { 296 // we'll chunk more 297 start = end - _array_chunk_size; 298 assert(start > 0, "invariant"); 299 arrayOop(old)->set_length(start); 300 push_depth(mask_chunked_array_oop(old)); 301 TASKQUEUE_STATS_ONLY(++_masked_pushes); 302 } else { 303 // this is the final chunk for this array 304 start = 0; 305 int const actual_length = arrayOop(obj)->length(); 306 arrayOop(old)->set_length(actual_length); 307 } 308 309 if (UseCompressedOops) { 310 process_array_chunk_work<narrowOop>(obj, start, end); 311 } else { 312 process_array_chunk_work<oop>(obj, start, end); 313 } 314 } 315 316 class PushContentsClosure : public ExtendedOopClosure { 317 PSPromotionManager* _pm; 318 public: 319 PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {} 320 321 template <typename T> void do_oop_nv(T* p) { 322 if (PSScavenge::should_scavenge(p)) { 323 _pm->claim_or_forward_depth(p); 324 } 325 } 326 327 virtual void do_oop(oop* p) { do_oop_nv(p); } 328 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 329 330 // Don't use the oop verification code in the oop_oop_iterate framework. 331 debug_only(virtual bool should_verify_oops() { return false; }) 332 }; 333 334 void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 335 PushContentsClosure cl(pm); 336 oop_oop_iterate_oop_maps_reverse<true>(obj, &cl); 337 } 338 339 void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 340 // Note that we don't have to follow the mirror -> klass pointer, since all 341 // klasses that are dirty will be scavenged when we iterate over the 342 // ClassLoaderData objects. 343 344 InstanceKlass::oop_ps_push_contents(obj, pm); 345 346 PushContentsClosure cl(pm); 347 oop_oop_iterate_statics<true>(obj, &cl); 348 } 349 350 void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 351 InstanceKlass::oop_ps_push_contents(obj, pm); 352 353 // This is called by the young collector. It will already have taken care of 354 // all class loader data. So, we don't have to follow the class loader -> 355 // class loader data link. 356 } 357 358 template <class T> 359 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) { 360 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); 361 if (PSScavenge::should_scavenge(referent_addr)) { 362 ReferenceProcessor* rp = PSScavenge::reference_processor(); 363 if (rp->discover_reference(obj, klass->reference_type())) { 364 // reference already enqueued, referent and next will be traversed later 365 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 366 return; 367 } else { 368 // treat referent as normal oop 369 pm->claim_or_forward_depth(referent_addr); 370 } 371 } 372 // Treat discovered as normal oop, if ref is not "active", 373 // i.e. if next is non-NULL. 374 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); 375 T next_oop = oopDesc::load_heap_oop(next_addr); 376 if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" 377 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); 378 log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); 379 if (PSScavenge::should_scavenge(discovered_addr)) { 380 pm->claim_or_forward_depth(discovered_addr); 381 } 382 } 383 // Treat next as normal oop; next is a link in the reference queue. 384 if (PSScavenge::should_scavenge(next_addr)) { 385 pm->claim_or_forward_depth(next_addr); 386 } 387 // Treat value as normal oop if Epehemeron 388 if (klass->reference_type() == REF_EPHEMERON) { 389 T* value_addr = (T*)java_lang_ref_Ephemeron::value_addr(obj); 390 if (PSScavenge::should_scavenge(value_addr)) { 391 pm->claim_or_forward_depth(value_addr); 392 } 393 } 394 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 395 } 396 397 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 398 if (UseCompressedOops) { 399 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm); 400 } else { 401 oop_ps_push_contents_specialized<oop>(obj, this, pm); 402 } 403 } 404 405 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 406 assert(obj->is_objArray(), "obj must be obj array"); 407 PushContentsClosure cl(pm); 408 oop_oop_iterate_elements<true>(objArrayOop(obj), &cl); 409 } 410 411 void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 412 assert(obj->is_typeArray(),"must be a type array"); 413 ShouldNotReachHere(); 414 } 415 416 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) { 417 assert(_old_gen_is_full || PromotionFailureALot, "Sanity"); 418 419 // Attempt to CAS in the header. 420 // This tests if the header is still the same as when 421 // this started. If it is the same (i.e., no forwarding 422 // pointer has been installed), then this thread owns 423 // it. 424 if (obj->cas_forward_to(obj, obj_mark)) { 425 // We won any races, we "own" this object. 426 assert(obj == obj->forwardee(), "Sanity"); 427 428 _promotion_failed_info.register_copy_failure(obj->size()); 429 430 push_contents(obj); 431 432 // Save the mark if needed 433 PSScavenge::oop_promotion_failed(obj, obj_mark); 434 } else { 435 // We lost, someone else "owns" this object 436 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed."); 437 438 // No unallocation to worry about. 439 obj = obj->forwardee(); 440 } 441 442 log_develop_trace(gc, scavenge)("{promotion-failure %s " PTR_FORMAT " (%d)}", obj->klass()->internal_name(), p2i(obj), obj->size()); 443 444 return obj; 445 }