1 /* 2 * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "gc/parallel/gcTaskManager.hpp" 28 #include "gc/parallel/mutableSpace.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psOldGen.hpp" 31 #include "gc/parallel/psPromotionManager.inline.hpp" 32 #include "gc/parallel/psScavenge.inline.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/preservedMarks.inline.hpp" 35 #include "gc/shared/taskqueue.inline.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logStream.hpp" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/iterator.inline.hpp" 40 #include "memory/memRegion.hpp" 41 #include "memory/padded.inline.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/access.inline.hpp" 44 #include "oops/arrayOop.inline.hpp" 45 #include "oops/compressedOops.inline.hpp" 46 #include "oops/instanceClassLoaderKlass.inline.hpp" 47 #include "oops/instanceKlass.inline.hpp" 48 #include "oops/instanceMirrorKlass.inline.hpp" 49 #include "oops/objArrayKlass.inline.hpp" 50 #include "oops/objArrayOop.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 53 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL; 54 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; 55 PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL; 56 PSOldGen* PSPromotionManager::_old_gen = NULL; 57 MutableSpace* PSPromotionManager::_young_space = NULL; 58 59 void PSPromotionManager::initialize() { 60 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 61 62 _old_gen = heap->old_gen(); 63 _young_space = heap->young_gen()->to_space(); 64 65 const uint promotion_manager_num = ParallelGCThreads + 1; 66 67 // To prevent false sharing, we pad the PSPromotionManagers 68 // and make sure that the first instance starts at a cache line. 69 assert(_manager_array == NULL, "Attempt to initialize twice"); 70 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num); 71 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); 72 73 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); 74 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); 75 76 // Create and register the PSPromotionManager(s) for the worker threads. 77 for(uint i=0; i<ParallelGCThreads; i++) { 78 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth()); 79 } 80 // The VMThread gets its own PSPromotionManager, which is not available 81 // for work stealing. 82 83 assert(_preserved_marks_set == NULL, "Attempt to initialize twice"); 84 _preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */); 85 guarantee(_preserved_marks_set != NULL, "Could not initialize preserved marks set"); 86 _preserved_marks_set->init(promotion_manager_num); 87 for (uint i = 0; i < promotion_manager_num; i += 1) { 88 _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i)); 89 } 90 } 91 92 // Helper functions to get around the circular dependency between 93 // psScavenge.inline.hpp and psPromotionManager.inline.hpp. 94 bool PSPromotionManager::should_scavenge(oop* p, bool check_to_space) { 95 return PSScavenge::should_scavenge(p, check_to_space); 96 } 97 bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) { 98 return PSScavenge::should_scavenge(p, check_to_space); 99 } 100 101 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) { 102 assert(index < ParallelGCThreads, "index out of range"); 103 assert(_manager_array != NULL, "Sanity"); 104 return &_manager_array[index]; 105 } 106 107 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() { 108 assert(_manager_array != NULL, "Sanity"); 109 return &_manager_array[ParallelGCThreads]; 110 } 111 112 void PSPromotionManager::pre_scavenge() { 113 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 114 115 _preserved_marks_set->assert_empty(); 116 _young_space = heap->young_gen()->to_space(); 117 118 for(uint i=0; i<ParallelGCThreads+1; i++) { 119 manager_array(i)->reset(); 120 } 121 } 122 123 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) { 124 bool promotion_failure_occurred = false; 125 126 TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); 127 for (uint i = 0; i < ParallelGCThreads + 1; i++) { 128 PSPromotionManager* manager = manager_array(i); 129 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); 130 if (manager->_promotion_failed_info.has_failed()) { 131 gc_tracer.report_promotion_failed(manager->_promotion_failed_info); 132 promotion_failure_occurred = true; 133 } 134 manager->flush_labs(); 135 } 136 if (!promotion_failure_occurred) { 137 // If there was no promotion failure, the preserved mark stacks 138 // should be empty. 139 _preserved_marks_set->assert_empty(); 140 } 141 return promotion_failure_occurred; 142 } 143 144 #if TASKQUEUE_STATS 145 void 146 PSPromotionManager::print_local_stats(outputStream* const out, uint i) const { 147 #define FMT " " SIZE_FORMAT_W(10) 148 out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals, 149 _arrays_chunked, _array_chunks_processed); 150 #undef FMT 151 } 152 153 static const char* const pm_stats_hdr[] = { 154 " --------masked------- arrays array", 155 "thr push steal chunked chunks", 156 "--- ---------- ---------- ---------- ----------" 157 }; 158 159 void 160 PSPromotionManager::print_taskqueue_stats() { 161 if (!log_is_enabled(Trace, gc, task, stats)) { 162 return; 163 } 164 Log(gc, task, stats) log; 165 ResourceMark rm; 166 LogStream ls(log.trace()); 167 outputStream* out = &ls; 168 out->print_cr("== GC Tasks Stats, GC %3d", 169 ParallelScavengeHeap::heap()->total_collections()); 170 171 TaskQueueStats totals; 172 out->print("thr "); TaskQueueStats::print_header(1, out); out->cr(); 173 out->print("--- "); TaskQueueStats::print_header(2, out); out->cr(); 174 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 175 TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats; 176 out->print("%3d ", i); next.print(out); out->cr(); 177 totals += next; 178 } 179 out->print("tot "); totals.print(out); out->cr(); 180 181 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]); 182 for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]); 183 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { 184 manager_array(i)->print_local_stats(out, i); 185 } 186 } 187 188 void 189 PSPromotionManager::reset_stats() { 190 claimed_stack_depth()->stats.reset(); 191 _masked_pushes = _masked_steals = 0; 192 _arrays_chunked = _array_chunks_processed = 0; 193 } 194 #endif // TASKQUEUE_STATS 195 196 PSPromotionManager::PSPromotionManager() { 197 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 198 199 // We set the old lab's start array. 200 _old_lab.set_start_array(old_gen()->start_array()); 201 202 uint queue_size; 203 claimed_stack_depth()->initialize(); 204 queue_size = claimed_stack_depth()->max_elems(); 205 206 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); 207 if (_totally_drain) { 208 _target_stack_size = 0; 209 } else { 210 // don't let the target stack size to be more than 1/4 of the entries 211 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize, 212 (uint) (queue_size / 4)); 213 } 214 215 _array_chunk_size = ParGCArrayScanChunk; 216 // let's choose 1.5x the chunk size 217 _min_array_size_for_chunking = 3 * _array_chunk_size / 2; 218 219 _preserved_marks = NULL; 220 221 reset(); 222 } 223 224 void PSPromotionManager::reset() { 225 assert(stacks_empty(), "reset of non-empty stack"); 226 227 // We need to get an assert in here to make sure the labs are always flushed. 228 229 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 230 231 // Do not prefill the LAB's, save heap wastage! 232 HeapWord* lab_base = young_space()->top(); 233 _young_lab.initialize(MemRegion(lab_base, (size_t)0)); 234 _young_gen_is_full = false; 235 236 lab_base = old_gen()->object_space()->top(); 237 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); 238 _old_gen_is_full = false; 239 240 _promotion_failed_info.reset(); 241 242 TASKQUEUE_STATS_ONLY(reset_stats()); 243 } 244 245 void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_marks) { 246 assert(_preserved_marks == NULL, "do not set it twice"); 247 _preserved_marks = preserved_marks; 248 } 249 250 class ParRestoreGCTask : public GCTask { 251 private: 252 const uint _id; 253 PreservedMarksSet* const _preserved_marks_set; 254 volatile size_t* const _total_size_addr; 255 256 public: 257 virtual char* name() { 258 return (char*) "preserved mark restoration task"; 259 } 260 261 virtual void do_it(GCTaskManager* manager, uint which){ 262 _preserved_marks_set->get(_id)->restore_and_increment(_total_size_addr); 263 } 264 265 ParRestoreGCTask(uint id, 266 PreservedMarksSet* preserved_marks_set, 267 volatile size_t* total_size_addr) 268 : _id(id), 269 _preserved_marks_set(preserved_marks_set), 270 _total_size_addr(total_size_addr) { } 271 }; 272 273 class PSRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor { 274 private: 275 GCTaskManager* _gc_task_manager; 276 277 public: 278 PSRestorePreservedMarksTaskExecutor(GCTaskManager* gc_task_manager) 279 : _gc_task_manager(gc_task_manager) { } 280 281 void restore(PreservedMarksSet* preserved_marks_set, 282 volatile size_t* total_size_addr) { 283 // GCTask / GCTaskQueue are ResourceObjs 284 ResourceMark rm; 285 286 GCTaskQueue* q = GCTaskQueue::create(); 287 for (uint i = 0; i < preserved_marks_set->num(); i += 1) { 288 q->enqueue(new ParRestoreGCTask(i, preserved_marks_set, total_size_addr)); 289 } 290 _gc_task_manager->execute_and_wait(q); 291 } 292 }; 293 294 void PSPromotionManager::restore_preserved_marks() { 295 PSRestorePreservedMarksTaskExecutor task_executor(PSScavenge::gc_task_manager()); 296 _preserved_marks_set->restore(&task_executor); 297 } 298 299 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { 300 totally_drain = totally_drain || _totally_drain; 301 302 #ifdef ASSERT 303 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 304 MutableSpace* to_space = heap->young_gen()->to_space(); 305 MutableSpace* old_space = heap->old_gen()->object_space(); 306 #endif /* ASSERT */ 307 308 OopStarTaskQueue* const tq = claimed_stack_depth(); 309 do { 310 StarTask p; 311 312 // Drain overflow stack first, so other threads can steal from 313 // claimed stack while we work. 314 while (tq->pop_overflow(p)) { 315 process_popped_location_depth(p); 316 } 317 318 if (totally_drain) { 319 while (tq->pop_local(p)) { 320 process_popped_location_depth(p); 321 } 322 } else { 323 while (tq->size() > _target_stack_size && tq->pop_local(p)) { 324 process_popped_location_depth(p); 325 } 326 } 327 } while ((totally_drain && !tq->taskqueue_empty()) || !tq->overflow_empty()); 328 329 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); 330 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); 331 assert(tq->overflow_empty(), "Sanity"); 332 } 333 334 void PSPromotionManager::flush_labs() { 335 assert(stacks_empty(), "Attempt to flush lab with live stack"); 336 337 // If either promotion lab fills up, we can flush the 338 // lab but not refill it, so check first. 339 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); 340 if (!_young_lab.is_flushed()) 341 _young_lab.flush(); 342 343 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity"); 344 if (!_old_lab.is_flushed()) 345 _old_lab.flush(); 346 347 // Let PSScavenge know if we overflowed 348 if (_young_gen_is_full) { 349 PSScavenge::set_survivor_overflow(true); 350 } 351 } 352 353 template <class T> void PSPromotionManager::process_array_chunk_work( 354 oop obj, 355 int start, int end) { 356 assert(start <= end, "invariant"); 357 T* const base = (T*)objArrayOop(obj)->base(); 358 T* p = base + start; 359 T* const chunk_end = base + end; 360 while (p < chunk_end) { 361 if (PSScavenge::should_scavenge(p)) { 362 claim_or_forward_depth(p); 363 } 364 ++p; 365 } 366 } 367 368 void PSPromotionManager::process_array_chunk(oop old) { 369 assert(PSChunkLargeArrays, "invariant"); 370 assert(old->is_objArray(), "invariant"); 371 assert(old->is_forwarded(), "invariant"); 372 373 TASKQUEUE_STATS_ONLY(++_array_chunks_processed); 374 375 oop const obj = old->forwardee(); 376 377 int start; 378 int const end = arrayOop(old)->length(); 379 if (end > (int) _min_array_size_for_chunking) { 380 // we'll chunk more 381 start = end - _array_chunk_size; 382 assert(start > 0, "invariant"); 383 arrayOop(old)->set_length(start); 384 push_depth(mask_chunked_array_oop(old)); 385 TASKQUEUE_STATS_ONLY(++_masked_pushes); 386 } else { 387 // this is the final chunk for this array 388 start = 0; 389 int const actual_length = arrayOop(obj)->length(); 390 arrayOop(old)->set_length(actual_length); 391 } 392 393 if (UseCompressedOops) { 394 process_array_chunk_work<narrowOop>(obj, start, end); 395 } else { 396 process_array_chunk_work<oop>(obj, start, end); 397 } 398 } 399 400 class PushContentsClosure : public BasicOopIterateClosure { 401 PSPromotionManager* _pm; 402 public: 403 PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {} 404 405 template <typename T> void do_oop_work(T* p) { 406 if (PSScavenge::should_scavenge(p)) { 407 _pm->claim_or_forward_depth(p); 408 } 409 } 410 411 virtual void do_oop(oop* p) { do_oop_work(p); } 412 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 413 414 // Don't use the oop verification code in the oop_oop_iterate framework. 415 debug_only(virtual bool should_verify_oops() { return false; }) 416 }; 417 418 void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 419 PushContentsClosure cl(pm); 420 if (UseCompressedOops) { 421 oop_oop_iterate_oop_maps_reverse<narrowOop>(obj, &cl); 422 } else { 423 oop_oop_iterate_oop_maps_reverse<oop>(obj, &cl); 424 } 425 } 426 427 void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 428 // Note that we don't have to follow the mirror -> klass pointer, since all 429 // klasses that are dirty will be scavenged when we iterate over the 430 // ClassLoaderData objects. 431 432 InstanceKlass::oop_ps_push_contents(obj, pm); 433 434 PushContentsClosure cl(pm); 435 if (UseCompressedOops) { 436 oop_oop_iterate_statics<narrowOop>(obj, &cl); 437 } else { 438 oop_oop_iterate_statics<oop>(obj, &cl); 439 } 440 } 441 442 void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 443 InstanceKlass::oop_ps_push_contents(obj, pm); 444 445 // This is called by the young collector. It will already have taken care of 446 // all class loader data. So, we don't have to follow the class loader -> 447 // class loader data link. 448 } 449 450 template <class T> 451 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) { 452 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); 453 if (PSScavenge::should_scavenge(referent_addr)) { 454 ReferenceProcessor* rp = PSScavenge::reference_processor(); 455 if (rp->discover_reference(obj, klass->reference_type())) { 456 // reference discovered, referent will be traversed later. 457 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 458 return; 459 } else { 460 // treat referent as normal oop 461 pm->claim_or_forward_depth(referent_addr); 462 } 463 } 464 // Treat discovered as normal oop 465 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); 466 if (PSScavenge::should_scavenge(discovered_addr)) { 467 pm->claim_or_forward_depth(discovered_addr); 468 } 469 klass->InstanceKlass::oop_ps_push_contents(obj, pm); 470 } 471 472 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 473 if (UseCompressedOops) { 474 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm); 475 } else { 476 oop_ps_push_contents_specialized<oop>(obj, this, pm); 477 } 478 } 479 480 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 481 assert(obj->is_objArray(), "obj must be obj array"); 482 PushContentsClosure cl(pm); 483 if (UseCompressedOops) { 484 oop_oop_iterate_elements<narrowOop>(objArrayOop(obj), &cl); 485 } else { 486 oop_oop_iterate_elements<oop>(objArrayOop(obj), &cl); 487 } 488 } 489 490 void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { 491 assert(obj->is_typeArray(),"must be a type array"); 492 ShouldNotReachHere(); 493 } 494 495 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) { 496 assert(_old_gen_is_full || PromotionFailureALot, "Sanity"); 497 498 // Attempt to CAS in the header. 499 // This tests if the header is still the same as when 500 // this started. If it is the same (i.e., no forwarding 501 // pointer has been installed), then this thread owns 502 // it. 503 if (obj->cas_forward_to(obj, obj_mark)) { 504 // We won any races, we "own" this object. 505 assert(obj == obj->forwardee(), "Sanity"); 506 507 _promotion_failed_info.register_copy_failure(obj->size()); 508 509 push_contents(obj); 510 511 _preserved_marks->push_if_necessary(obj, obj_mark); 512 } else { 513 // We lost, someone else "owns" this object 514 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed."); 515 516 // No unallocation to worry about. 517 obj = obj->forwardee(); 518 } 519 520 log_develop_trace(gc, scavenge)("{promotion-failure %s " PTR_FORMAT " (%d)}", obj->klass()->internal_name(), p2i(obj), obj->size()); 521 522 return obj; 523 }