1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/copyFailedInfo.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" 37 #include "gc_implementation/shared/spaceDecorator.hpp" 38 #include "memory/defNewGeneration.inline.hpp" 39 #include "memory/genCollectedHeap.hpp" 40 #include "memory/genOopClosures.inline.hpp" 41 #include "memory/generation.hpp" 42 #include "memory/referencePolicy.hpp" 43 #include "memory/resourceArea.hpp" 44 #include "memory/sharedHeap.hpp" 45 #include "memory/space.hpp" 46 #include "oops/objArrayOop.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "oops/oop.pcgc.inline.hpp" 49 #include "runtime/atomic.inline.hpp" 50 #include "runtime/handles.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/thread.inline.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 #include "utilities/workgroup.hpp" 57 58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 59 60 ParScanThreadState::ParScanThreadState(Space* to_space_, 61 ParNewGeneration* gen_, 62 Generation* old_gen_, 63 int thread_num_, 64 ObjToScanQueueSet* work_queue_set_, 65 Stack<oop, mtGC>* overflow_stacks_, 66 size_t desired_plab_sz_, 67 ParallelTaskTerminator& term_) : 68 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 69 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 70 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 71 _ageTable(false), // false ==> not the global age table, no perf data. 72 _to_space_alloc_buffer(desired_plab_sz_), 73 _is_alive_closure(gen_), 74 _keep_alive_closure(&_scan_weak_ref_closure), 75 _strong_roots_time(0.0), _term_time(0.0) 76 { 77 _to_space_closure = ParScanWithoutBarrierClosure(gen_, this); 78 _old_gen_closure = ParScanWithBarrierClosure(gen_, this); 79 _to_space_root_closure = ParRootScanWithoutBarrierClosure(gen_, this); 80 _old_gen_root_closure = ParRootScanWithBarrierTwoGensClosure(gen_, this); 81 _older_gen_closure = ParRootScanWithBarrierTwoGensClosure(gen_, this); 82 _scan_weak_ref_closure = ParScanWeakRefClosure(gen_, this); 83 84 _evacuate_followers = ParEvacuateFollowersClosure(this, 85 &_to_space_closure, 86 &_old_gen_closure, 87 &_to_space_root_closure, 88 gen_, 89 &_old_gen_root_closure, 90 work_queue_set_, 91 &term_); 92 93 94 #if TASKQUEUE_STATS 95 _term_attempts = 0; 96 _overflow_refills = 0; 97 _overflow_refill_objs = 0; 98 #endif // TASKQUEUE_STATS 99 100 _survivor_chunk_array = 101 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 102 _hash_seed = 17; // Might want to take time-based random value. 103 _start = os::elapsedTime(); 104 _old_gen_closure.set_generation(old_gen_); 105 _old_gen_root_closure.set_generation(old_gen_); 106 } 107 108 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 109 size_t plab_word_size) { 110 ChunkArray* sca = survivor_chunk_array(); 111 if (sca != NULL) { 112 // A non-null SCA implies that we want the PLAB data recorded. 113 sca->record_sample(plab_start, plab_word_size); 114 } 115 } 116 117 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 118 return new_obj->is_objArray() && 119 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 120 new_obj != old_obj; 121 } 122 123 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 124 assert(old->is_objArray(), "must be obj array"); 125 assert(old->is_forwarded(), "must be forwarded"); 126 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 127 assert(!old_gen()->is_in(old), "must be in young generation."); 128 129 objArrayOop obj = objArrayOop(old->forwardee()); 130 // Process ParGCArrayScanChunk elements now 131 // and push the remainder back onto queue 132 int start = arrayOop(old)->length(); 133 int end = obj->length(); 134 int remainder = end - start; 135 assert(start <= end, "just checking"); 136 if (remainder > 2 * ParGCArrayScanChunk) { 137 // Test above combines last partial chunk with a full chunk 138 end = start + ParGCArrayScanChunk; 139 arrayOop(old)->set_length(end); 140 // Push remainder. 141 bool ok = work_queue()->push(old); 142 assert(ok, "just popped, push must be okay"); 143 } else { 144 // Restore length so that it can be used if there 145 // is a promotion failure and forwarding pointers 146 // must be removed. 147 arrayOop(old)->set_length(end); 148 } 149 150 // process our set of indices (include header in first chunk) 151 // should make sure end is even (aligned to HeapWord in case of compressed oops) 152 if ((HeapWord *)obj < young_old_boundary()) { 153 // object is in to_space 154 obj->oop_iterate_range(&_to_space_closure, start, end); 155 } else { 156 // object is in old generation 157 obj->oop_iterate_range(&_old_gen_closure, start, end); 158 } 159 } 160 161 162 void ParScanThreadState::trim_queues(int max_size) { 163 ObjToScanQueue* queue = work_queue(); 164 do { 165 while (queue->size() > (juint)max_size) { 166 oop obj_to_scan; 167 if (queue->pop_local(obj_to_scan)) { 168 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 169 if (obj_to_scan->is_objArray() && 170 obj_to_scan->is_forwarded() && 171 obj_to_scan->forwardee() != obj_to_scan) { 172 scan_partial_array_and_push_remainder(obj_to_scan); 173 } else { 174 // object is in to_space 175 obj_to_scan->oop_iterate(&_to_space_closure); 176 } 177 } else { 178 // object is in old generation 179 obj_to_scan->oop_iterate(&_old_gen_closure); 180 } 181 } 182 } 183 // For the case of compressed oops, we have a private, non-shared 184 // overflow stack, so we eagerly drain it so as to more evenly 185 // distribute load early. Note: this may be good to do in 186 // general rather than delay for the final stealing phase. 187 // If applicable, we'll transfer a set of objects over to our 188 // work queue, allowing them to be stolen and draining our 189 // private overflow stack. 190 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 191 } 192 193 bool ParScanThreadState::take_from_overflow_stack() { 194 assert(ParGCUseLocalOverflow, "Else should not call"); 195 assert(young_gen()->overflow_list() == NULL, "Error"); 196 ObjToScanQueue* queue = work_queue(); 197 Stack<oop, mtGC>* const of_stack = overflow_stack(); 198 const size_t num_overflow_elems = of_stack->size(); 199 const size_t space_available = queue->max_elems() - queue->size(); 200 const size_t num_take_elems = MIN3(space_available / 4, 201 ParGCDesiredObjsFromOverflowList, 202 num_overflow_elems); 203 // Transfer the most recent num_take_elems from the overflow 204 // stack to our work queue. 205 for (size_t i = 0; i != num_take_elems; i++) { 206 oop cur = of_stack->pop(); 207 oop obj_to_push = cur->forwardee(); 208 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 209 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 210 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 211 if (should_be_partially_scanned(obj_to_push, cur)) { 212 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 213 obj_to_push = cur; 214 } 215 bool ok = queue->push(obj_to_push); 216 assert(ok, "Should have succeeded"); 217 } 218 assert(young_gen()->overflow_list() == NULL, "Error"); 219 return num_take_elems > 0; // was something transferred? 220 } 221 222 void ParScanThreadState::push_on_overflow_stack(oop p) { 223 assert(ParGCUseLocalOverflow, "Else should not call"); 224 overflow_stack()->push(p); 225 assert(young_gen()->overflow_list() == NULL, "Error"); 226 } 227 228 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 229 230 // Otherwise, if the object is small enough, try to reallocate the 231 // buffer. 232 HeapWord* obj = NULL; 233 if (!_to_space_full) { 234 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 235 Space* const sp = to_space(); 236 if (word_sz * 100 < 237 ParallelGCBufferWastePct * plab->word_sz()) { 238 // Is small enough; abandon this buffer and start a new one. 239 plab->retire(false, false); 240 size_t buf_size = plab->word_sz(); 241 HeapWord* buf_space = sp->par_allocate(buf_size); 242 if (buf_space == NULL) { 243 const size_t min_bytes = 244 ParGCAllocBuffer::min_size() << LogHeapWordSize; 245 size_t free_bytes = sp->free(); 246 while(buf_space == NULL && free_bytes >= min_bytes) { 247 buf_size = free_bytes >> LogHeapWordSize; 248 assert(buf_size == (size_t)align_object_size(buf_size), 249 "Invariant"); 250 buf_space = sp->par_allocate(buf_size); 251 free_bytes = sp->free(); 252 } 253 } 254 if (buf_space != NULL) { 255 plab->set_word_size(buf_size); 256 plab->set_buf(buf_space); 257 record_survivor_plab(buf_space, buf_size); 258 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 259 // Note that we cannot compare buf_size < word_sz below 260 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 261 assert(obj != NULL || plab->words_remaining() < word_sz, 262 "Else should have been able to allocate"); 263 // It's conceivable that we may be able to use the 264 // buffer we just grabbed for subsequent small requests 265 // even if not for this one. 266 } else { 267 // We're used up. 268 _to_space_full = true; 269 } 270 271 } else { 272 // Too large; allocate the object individually. 273 obj = sp->par_allocate(word_sz); 274 } 275 } 276 return obj; 277 } 278 279 280 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 281 size_t word_sz) { 282 // Is the alloc in the current alloc buffer? 283 if (to_space_alloc_buffer()->contains(obj)) { 284 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 285 "Should contain whole object."); 286 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 287 } else { 288 CollectedHeap::fill_with_object(obj, word_sz); 289 } 290 } 291 292 void ParScanThreadState::print_promotion_failure_size() { 293 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 294 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 295 _thread_num, _promotion_failed_info.first_size()); 296 } 297 } 298 299 class ParScanThreadStateSet: private ResourceArray { 300 public: 301 // Initializes states for the specified number of threads; 302 ParScanThreadStateSet(int num_threads, 303 Space& to_space, 304 ParNewGeneration& gen, 305 Generation& old_gen, 306 ObjToScanQueueSet& queue_set, 307 Stack<oop, mtGC>* overflow_stacks_, 308 size_t desired_plab_sz, 309 ParallelTaskTerminator& term); 310 311 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 312 313 inline ParScanThreadState& thread_state(int i); 314 315 void trace_promotion_failed(YoungGCTracer& gc_tracer); 316 void reset(int active_workers, bool promotion_failed); 317 void flush(); 318 319 #if TASKQUEUE_STATS 320 static void 321 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 322 void print_termination_stats(outputStream* const st = gclog_or_tty); 323 static void 324 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 325 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 326 void reset_stats(); 327 #endif // TASKQUEUE_STATS 328 329 private: 330 ParallelTaskTerminator& _term; 331 ParNewGeneration& _gen; 332 Generation& _next_gen; 333 public: 334 bool is_valid(int id) const { return id < length(); } 335 ParallelTaskTerminator* terminator() { return &_term; } 336 }; 337 338 339 ParScanThreadStateSet::ParScanThreadStateSet( 340 int num_threads, Space& to_space, ParNewGeneration& gen, 341 Generation& old_gen, ObjToScanQueueSet& queue_set, 342 Stack<oop, mtGC>* overflow_stacks, 343 size_t desired_plab_sz, ParallelTaskTerminator& term) 344 : ResourceArray(sizeof(ParScanThreadState), num_threads), 345 _gen(gen), _next_gen(old_gen), _term(term) 346 { 347 assert(num_threads > 0, "sanity check!"); 348 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 349 "overflow_stack allocation mismatch"); 350 // Initialize states. 351 for (int i = 0; i < num_threads; ++i) { 352 new ((ParScanThreadState*)_data + i) 353 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 354 overflow_stacks, desired_plab_sz, term); 355 } 356 } 357 358 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 359 { 360 assert(i >= 0 && i < length(), "sanity check!"); 361 return ((ParScanThreadState*)_data)[i]; 362 } 363 364 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { 365 for (int i = 0; i < length(); ++i) { 366 if (thread_state(i).promotion_failed()) { 367 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); 368 thread_state(i).promotion_failed_info().reset(); 369 } 370 } 371 } 372 373 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) 374 { 375 _term.reset_for_reuse(active_threads); 376 if (promotion_failed) { 377 for (int i = 0; i < length(); ++i) { 378 thread_state(i).print_promotion_failure_size(); 379 } 380 } 381 } 382 383 #if TASKQUEUE_STATS 384 void 385 ParScanThreadState::reset_stats() 386 { 387 taskqueue_stats().reset(); 388 _term_attempts = 0; 389 _overflow_refills = 0; 390 _overflow_refill_objs = 0; 391 } 392 393 void ParScanThreadStateSet::reset_stats() 394 { 395 for (int i = 0; i < length(); ++i) { 396 thread_state(i).reset_stats(); 397 } 398 } 399 400 void 401 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 402 { 403 st->print_raw_cr("GC Termination Stats"); 404 st->print_raw_cr(" elapsed --strong roots-- " 405 "-------termination-------"); 406 st->print_raw_cr("thr ms ms % " 407 " ms % attempts"); 408 st->print_raw_cr("--- --------- --------- ------ " 409 "--------- ------ --------"); 410 } 411 412 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 413 { 414 print_termination_stats_hdr(st); 415 416 for (int i = 0; i < length(); ++i) { 417 const ParScanThreadState & pss = thread_state(i); 418 const double elapsed_ms = pss.elapsed_time() * 1000.0; 419 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 420 const double term_ms = pss.term_time() * 1000.0; 421 st->print_cr("%3d %9.2f %9.2f %6.2f " 422 "%9.2f %6.2f " SIZE_FORMAT_W(8), 423 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 424 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 425 } 426 } 427 428 // Print stats related to work queue activity. 429 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 430 { 431 st->print_raw_cr("GC Task Stats"); 432 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 433 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 434 } 435 436 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 437 { 438 print_taskqueue_stats_hdr(st); 439 440 TaskQueueStats totals; 441 for (int i = 0; i < length(); ++i) { 442 const ParScanThreadState & pss = thread_state(i); 443 const TaskQueueStats & stats = pss.taskqueue_stats(); 444 st->print("%3d ", i); stats.print(st); st->cr(); 445 totals += stats; 446 447 if (pss.overflow_refills() > 0) { 448 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 449 SIZE_FORMAT_W(10) " overflow objects", 450 pss.overflow_refills(), pss.overflow_refill_objs()); 451 } 452 } 453 st->print("tot "); totals.print(st); st->cr(); 454 455 DEBUG_ONLY(totals.verify()); 456 } 457 #endif // TASKQUEUE_STATS 458 459 void ParScanThreadStateSet::flush() 460 { 461 // Work in this loop should be kept as lightweight as 462 // possible since this might otherwise become a bottleneck 463 // to scaling. Should we add heavy-weight work into this 464 // loop, consider parallelizing the loop into the worker threads. 465 for (int i = 0; i < length(); ++i) { 466 ParScanThreadState& par_scan_state = thread_state(i); 467 468 // Flush stats related to To-space PLAB activity and 469 // retire the last buffer. 470 par_scan_state.to_space_alloc_buffer()-> 471 flush_stats_and_retire(_gen.plab_stats(), 472 true /* end_of_gc */, 473 false /* retain */); 474 475 // Every thread has its own age table. We need to merge 476 // them all into one. 477 ageTable *local_table = par_scan_state.age_table(); 478 _gen.age_table()->merge(local_table); 479 480 // Inform old gen that we're done. 481 _next_gen.par_promote_alloc_done(i); 482 _next_gen.par_oop_since_save_marks_iterate_done(i); 483 } 484 485 if (UseConcMarkSweepGC) { 486 // We need to call this even when ResizeOldPLAB is disabled 487 // so as to avoid breaking some asserts. While we may be able 488 // to avoid this by reorganizing the code a bit, I am loathe 489 // to do that unless we find cases where ergo leads to bad 490 // performance. 491 CFLS_LAB::compute_desired_plab_size(); 492 } 493 } 494 495 ParScanClosure::ParScanClosure(ParNewGeneration* g, 496 ParScanThreadState* par_scan_state) : 497 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) 498 { 499 assert(_g->level() == 0, "Optimized for youngest generation"); 500 _boundary = _g->reserved().end(); 501 } 502 503 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 504 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 505 506 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 507 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 508 509 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 510 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 511 512 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 513 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 514 515 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 516 ParScanThreadState* par_scan_state) 517 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 518 {} 519 520 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 521 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 522 523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 524 ParScanThreadState* par_scan_state_, 525 ParScanWithoutBarrierClosure* to_space_closure_, 526 ParScanWithBarrierClosure* old_gen_closure_, 527 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 528 ParNewGeneration* par_gen_, 529 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 530 ObjToScanQueueSet* task_queues_, 531 ParallelTaskTerminator* terminator_) : 532 533 _par_scan_state(par_scan_state_), 534 _to_space_closure(to_space_closure_), 535 _old_gen_closure(old_gen_closure_), 536 _to_space_root_closure(to_space_root_closure_), 537 _old_gen_root_closure(old_gen_root_closure_), 538 _par_gen(par_gen_), 539 _task_queues(task_queues_), 540 _terminator(terminator_) 541 {} 542 543 void ParEvacuateFollowersClosure::do_void() { 544 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 545 546 while (true) { 547 548 // Scan to-space and old-gen objs until we run out of both. 549 oop obj_to_scan; 550 par_scan_state()->trim_queues(0); 551 552 // We have no local work, attempt to steal from other threads. 553 554 // attempt to steal work from promoted. 555 if (task_queues()->steal(par_scan_state()->thread_num(), 556 par_scan_state()->hash_seed(), 557 obj_to_scan)) { 558 bool res = work_q->push(obj_to_scan); 559 assert(res, "Empty queue should have room for a push."); 560 561 // if successful, goto Start. 562 continue; 563 564 // try global overflow list. 565 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 566 continue; 567 } 568 569 // Otherwise, offer termination. 570 par_scan_state()->start_term_time(); 571 if (terminator()->offer_termination()) break; 572 par_scan_state()->end_term_time(); 573 } 574 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 575 "Broken overflow list?"); 576 // Finish the last termination pause. 577 par_scan_state()->end_term_time(); 578 } 579 580 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 581 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 582 AbstractGangTask("ParNewGeneration collection"), 583 _gen(gen), _next_gen(next_gen), 584 _young_old_boundary(young_old_boundary), 585 _state_set(state_set) 586 {} 587 588 // Reset the terminator for the given number of 589 // active threads. 590 void ParNewGenTask::set_for_termination(int active_workers) { 591 _state_set->reset(active_workers, _gen->promotion_failed()); 592 // Should the heap be passed in? There's only 1 for now so 593 // grab it instead. 594 GenCollectedHeap* gch = GenCollectedHeap::heap(); 595 gch->set_n_termination(active_workers); 596 } 597 598 void ParNewGenTask::work(uint worker_id) { 599 GenCollectedHeap* gch = GenCollectedHeap::heap(); 600 // Since this is being done in a separate thread, need new resource 601 // and handle marks. 602 ResourceMark rm; 603 HandleMark hm; 604 // We would need multiple old-gen queues otherwise. 605 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 606 607 Generation* old_gen = gch->next_gen(_gen); 608 609 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 610 assert(_state_set->is_valid(worker_id), "Should not have been called"); 611 612 par_scan_state.set_young_old_boundary(_young_old_boundary); 613 614 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 615 gch->rem_set()->klass_rem_set()); 616 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 617 &par_scan_state.to_space_root_closure(), 618 false); 619 620 par_scan_state.start_strong_roots(); 621 gch->gen_process_roots(_gen->level(), 622 true, // Process younger gens, if any, 623 // as strong roots. 624 false, // no scope; this is parallel code 625 SharedHeap::SO_ScavengeCodeCache, 626 GenCollectedHeap::StrongAndWeakRoots, 627 &par_scan_state.to_space_root_closure(), 628 &par_scan_state.older_gen_closure(), 629 &cld_scan_closure); 630 631 par_scan_state.end_strong_roots(); 632 633 // "evacuate followers". 634 par_scan_state.evacuate_followers_closure().do_void(); 635 } 636 637 ParNewGeneration:: 638 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 639 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 640 _overflow_list(NULL), 641 _plab_stats(YoungPLABSize, PLABWeight) 642 { 643 _is_alive_closure = DefNewGeneration::IsAliveClosure(this); 644 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 645 NOT_PRODUCT(_num_par_pushes = 0;) 646 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 647 guarantee(_task_queues != NULL, "task_queues allocation failure."); 648 649 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 650 ObjToScanQueue *q = new ObjToScanQueue(); 651 guarantee(q != NULL, "work_queue Allocation failure."); 652 _task_queues->register_queue(i1, q); 653 } 654 655 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 656 _task_queues->queue(i2)->initialize(); 657 658 _overflow_stacks = NULL; 659 if (ParGCUseLocalOverflow) { 660 661 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 662 // with ',' 663 typedef Stack<oop, mtGC> GCOopStack; 664 665 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 666 for (size_t i = 0; i < ParallelGCThreads; ++i) { 667 new (_overflow_stacks + i) Stack<oop, mtGC>(); 668 } 669 } 670 671 if (UsePerfData) { 672 EXCEPTION_MARK; 673 ResourceMark rm; 674 675 const char* cname = 676 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 677 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 678 ParallelGCThreads, CHECK); 679 } 680 } 681 682 // ParNewGeneration:: 683 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 684 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 685 686 template <class T> 687 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 688 #ifdef ASSERT 689 { 690 assert(!oopDesc::is_null(*p), "expected non-null ref"); 691 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 692 // We never expect to see a null reference being processed 693 // as a weak reference. 694 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 695 } 696 #endif // ASSERT 697 698 _par_cl->do_oop_nv(p); 699 700 if (Universe::heap()->is_in_reserved(p)) { 701 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 702 _rs->write_ref_field_gc_par(p, obj); 703 } 704 } 705 706 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 707 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 708 709 // ParNewGeneration:: 710 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 711 DefNewGeneration::KeepAliveClosure(cl) {} 712 713 template <class T> 714 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 715 #ifdef ASSERT 716 { 717 assert(!oopDesc::is_null(*p), "expected non-null ref"); 718 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 719 // We never expect to see a null reference being processed 720 // as a weak reference. 721 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 722 } 723 #endif // ASSERT 724 725 _cl->do_oop_nv(p); 726 727 if (Universe::heap()->is_in_reserved(p)) { 728 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 729 _rs->write_ref_field_gc_par(p, obj); 730 } 731 } 732 733 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 734 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 735 736 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 737 T heap_oop = oopDesc::load_heap_oop(p); 738 if (!oopDesc::is_null(heap_oop)) { 739 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 740 if ((HeapWord*)obj < _boundary) { 741 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 742 oop new_obj = obj->is_forwarded() 743 ? obj->forwardee() 744 : _g->DefNewGeneration::copy_to_survivor_space(obj); 745 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 746 } 747 if (_gc_barrier) { 748 // If p points to a younger generation, mark the card. 749 if ((HeapWord*)obj < _gen_boundary) { 750 _rs->write_ref_field_gc_par(p, obj); 751 } 752 } 753 } 754 } 755 756 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 757 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 758 759 class ParNewRefProcTaskProxy: public AbstractGangTask { 760 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 761 public: 762 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 763 Generation& next_gen, 764 HeapWord* young_old_boundary, 765 ParScanThreadStateSet& state_set); 766 767 private: 768 virtual void work(uint worker_id); 769 virtual void set_for_termination(int active_workers) { 770 _state_set.terminator()->reset_for_reuse(active_workers); 771 } 772 private: 773 ParNewGeneration& _gen; 774 ProcessTask& _task; 775 Generation& _next_gen; 776 HeapWord* _young_old_boundary; 777 ParScanThreadStateSet& _state_set; 778 }; 779 780 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 781 ProcessTask& task, ParNewGeneration& gen, 782 Generation& next_gen, 783 HeapWord* young_old_boundary, 784 ParScanThreadStateSet& state_set) 785 : AbstractGangTask("ParNewGeneration parallel reference processing"), 786 _gen(gen), 787 _task(task), 788 _next_gen(next_gen), 789 _young_old_boundary(young_old_boundary), 790 _state_set(state_set) 791 { 792 } 793 794 void ParNewRefProcTaskProxy::work(uint worker_id) 795 { 796 ResourceMark rm; 797 HandleMark hm; 798 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 799 par_scan_state.set_young_old_boundary(_young_old_boundary); 800 _task.work(worker_id, par_scan_state.is_alive_closure(), 801 par_scan_state.keep_alive_closure(), 802 par_scan_state.evacuate_followers_closure()); 803 } 804 805 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 806 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 807 EnqueueTask& _task; 808 809 public: 810 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 811 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 812 _task(task) 813 { } 814 815 virtual void work(uint worker_id) 816 { 817 _task.work(worker_id); 818 } 819 }; 820 821 822 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 823 { 824 GenCollectedHeap* gch = GenCollectedHeap::heap(); 825 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 826 "not a generational heap"); 827 FlexibleWorkGang* workers = gch->workers(); 828 assert(workers != NULL, "Need parallel worker threads."); 829 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 830 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 831 _generation.reserved().end(), _state_set); 832 workers->run_task(&rp_task); 833 _state_set.reset(0 /* bad value in debug if not reset */, 834 _generation.promotion_failed()); 835 } 836 837 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 838 { 839 GenCollectedHeap* gch = GenCollectedHeap::heap(); 840 FlexibleWorkGang* workers = gch->workers(); 841 assert(workers != NULL, "Need parallel worker threads."); 842 ParNewRefEnqueueTaskProxy enq_task(task); 843 workers->run_task(&enq_task); 844 } 845 846 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 847 { 848 _state_set.flush(); 849 GenCollectedHeap* gch = GenCollectedHeap::heap(); 850 gch->set_par_threads(0); // 0 ==> non-parallel. 851 gch->save_marks(); 852 } 853 854 ScanClosureWithParBarrier:: 855 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 856 ScanClosure(g, gc_barrier) {} 857 858 EvacuateFollowersClosureGeneral:: 859 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 860 OopsInGenClosure* cur, 861 OopsInGenClosure* older) : 862 _gch(gch), _level(level), 863 _scan_cur_or_nonheap(cur), _scan_older(older) 864 {} 865 866 void EvacuateFollowersClosureGeneral::do_void() { 867 do { 868 // Beware: this call will lead to closure applications via virtual 869 // calls. 870 _gch->oop_since_save_marks_iterate(_level, 871 _scan_cur_or_nonheap, 872 _scan_older); 873 } while (!_gch->no_allocs_since_save_marks(_level)); 874 } 875 876 877 // A Generation that does parallel young-gen collection. 878 879 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { 880 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 881 _promo_failure_scan_stack.clear(true); // Clear cached segments. 882 883 remove_forwarding_pointers(); 884 if (PrintGCDetails) { 885 gclog_or_tty->print(" (promotion failed)"); 886 } 887 // All the spaces are in play for mark-sweep. 888 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 889 from()->set_next_compaction_space(to()); 890 gch->set_incremental_collection_failed(); 891 // Inform the next generation that a promotion failure occurred. 892 _next_gen->promotion_failure_occurred(); 893 894 // Trace promotion failure in the parallel GC threads 895 thread_state_set.trace_promotion_failed(gc_tracer); 896 // Single threaded code may have reported promotion failure to the global state 897 if (_promotion_failed_info.has_failed()) { 898 gc_tracer.report_promotion_failed(_promotion_failed_info); 899 } 900 // Reset the PromotionFailureALot counters. 901 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 902 } 903 904 void ParNewGeneration::collect(bool full, 905 bool clear_all_soft_refs, 906 size_t size, 907 bool is_tlab) { 908 assert(full || size > 0, "otherwise we don't want to collect"); 909 910 GenCollectedHeap* gch = GenCollectedHeap::heap(); 911 912 _gc_timer->register_gc_start(); 913 914 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 915 "not a CMS generational heap"); 916 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 917 FlexibleWorkGang* workers = gch->workers(); 918 assert(workers != NULL, "Need workgang for parallel work"); 919 int active_workers = 920 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 921 workers->active_workers(), 922 Threads::number_of_non_daemon_threads()); 923 workers->set_active_workers(active_workers); 924 assert(gch->n_gens() == 2, 925 "Par collection currently only works with single older gen."); 926 _next_gen = gch->next_gen(this); 927 928 // If the next generation is too full to accommodate worst-case promotion 929 // from this generation, pass on collection; let the next generation 930 // do it. 931 if (!collection_attempt_is_safe()) { 932 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 933 return; 934 } 935 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 936 937 ParNewTracer gc_tracer; 938 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 939 gch->trace_heap_before_gc(&gc_tracer); 940 941 init_assuming_no_promotion_failure(); 942 943 if (UseAdaptiveSizePolicy) { 944 set_survivor_overflow(false); 945 size_policy->minor_collection_begin(); 946 } 947 948 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 949 // Capture heap used before collection (for printing). 950 size_t gch_prev_used = gch->used(); 951 952 SpecializationStats::clear(); 953 954 age_table()->clear(); 955 to()->clear(SpaceDecorator::Mangle); 956 957 gch->save_marks(); 958 assert(workers != NULL, "Need parallel worker threads."); 959 int n_workers = active_workers; 960 961 // Set the correct parallelism (number of queues) in the reference processor 962 ref_processor()->set_active_mt_degree(n_workers); 963 964 // Always set the terminator for the active number of workers 965 // because only those workers go through the termination protocol. 966 ParallelTaskTerminator _term(n_workers, task_queues()); 967 ParScanThreadStateSet thread_state_set(workers->active_workers(), 968 *to(), *this, *_next_gen, *task_queues(), 969 _overflow_stacks, desired_plab_sz(), _term); 970 971 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 972 gch->set_par_threads(n_workers); 973 gch->rem_set()->prepare_for_younger_refs_iterate(true); 974 // It turns out that even when we're using 1 thread, doing the work in a 975 // separate thread causes wide variance in run times. We can't help this 976 // in the multi-threaded case, but we special-case n=1 here to get 977 // repeatable measurements of the 1-thread overhead of the parallel code. 978 if (n_workers > 1) { 979 GenCollectedHeap::StrongRootsScope srs(gch); 980 workers->run_task(&tsk); 981 } else { 982 GenCollectedHeap::StrongRootsScope srs(gch); 983 tsk.work(0); 984 } 985 thread_state_set.reset(0 /* Bad value in debug if not reset */, 986 promotion_failed()); 987 988 // Trace and reset failed promotion info. 989 if (promotion_failed()) { 990 thread_state_set.trace_promotion_failed(gc_tracer); 991 } 992 993 // Process (weak) reference objects found during scavenge. 994 ReferenceProcessor* rp = ref_processor(); 995 IsAliveClosure is_alive(this); 996 ScanWeakRefClosure scan_weak_ref(this); 997 KeepAliveClosure keep_alive(&scan_weak_ref); 998 ScanClosure scan_without_gc_barrier(this, false); 999 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 1000 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 1001 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 1002 &scan_without_gc_barrier, &scan_with_gc_barrier); 1003 rp->setup_policy(clear_all_soft_refs); 1004 // Can the mt_degree be set later (at run_task() time would be best)? 1005 rp->set_active_mt_degree(active_workers); 1006 ReferenceProcessorStats stats; 1007 if (rp->processing_is_mt()) { 1008 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1009 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1010 &evacuate_followers, &task_executor, 1011 _gc_timer, gc_tracer.gc_id()); 1012 } else { 1013 thread_state_set.flush(); 1014 gch->set_par_threads(0); // 0 ==> non-parallel. 1015 gch->save_marks(); 1016 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1017 &evacuate_followers, NULL, 1018 _gc_timer, gc_tracer.gc_id()); 1019 } 1020 gc_tracer.report_gc_reference_stats(stats); 1021 if (!promotion_failed()) { 1022 // Swap the survivor spaces. 1023 eden()->clear(SpaceDecorator::Mangle); 1024 from()->clear(SpaceDecorator::Mangle); 1025 if (ZapUnusedHeapArea) { 1026 // This is now done here because of the piece-meal mangling which 1027 // can check for valid mangling at intermediate points in the 1028 // collection(s). When a minor collection fails to collect 1029 // sufficient space resizing of the young generation can occur 1030 // an redistribute the spaces in the young generation. Mangle 1031 // here so that unzapped regions don't get distributed to 1032 // other spaces. 1033 to()->mangle_unused_area(); 1034 } 1035 swap_spaces(); 1036 1037 // A successful scavenge should restart the GC time limit count which is 1038 // for full GC's. 1039 size_policy->reset_gc_overhead_limit_count(); 1040 1041 assert(to()->is_empty(), "to space should be empty now"); 1042 1043 adjust_desired_tenuring_threshold(); 1044 } else { 1045 handle_promotion_failed(gch, thread_state_set, gc_tracer); 1046 } 1047 // set new iteration safe limit for the survivor spaces 1048 from()->set_concurrent_iteration_safe_limit(from()->top()); 1049 to()->set_concurrent_iteration_safe_limit(to()->top()); 1050 1051 if (ResizePLAB) { 1052 plab_stats()->adjust_desired_plab_sz(n_workers); 1053 } 1054 1055 if (PrintGC && !PrintGCDetails) { 1056 gch->print_heap_change(gch_prev_used); 1057 } 1058 1059 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1060 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1061 1062 if (UseAdaptiveSizePolicy) { 1063 size_policy->minor_collection_end(gch->gc_cause()); 1064 size_policy->avg_survived()->sample(from()->used()); 1065 } 1066 1067 // We need to use a monotonically non-decreasing time in ms 1068 // or we will see time-warp warnings and os::javaTimeMillis() 1069 // does not guarantee monotonicity. 1070 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1071 update_time_of_last_gc(now); 1072 1073 SpecializationStats::print(); 1074 1075 rp->set_enqueuing_is_done(true); 1076 if (rp->processing_is_mt()) { 1077 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1078 rp->enqueue_discovered_references(&task_executor); 1079 } else { 1080 rp->enqueue_discovered_references(NULL); 1081 } 1082 rp->verify_no_references_recorded(); 1083 1084 gch->trace_heap_after_gc(&gc_tracer); 1085 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1086 1087 _gc_timer->register_gc_end(); 1088 1089 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1090 } 1091 1092 static int sum; 1093 void ParNewGeneration::waste_some_time() { 1094 for (int i = 0; i < 100; i++) { 1095 sum += i; 1096 } 1097 } 1098 1099 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1100 1101 // Because of concurrency, there are times where an object for which 1102 // "is_forwarded()" is true contains an "interim" forwarding pointer 1103 // value. Such a value will soon be overwritten with a real value. 1104 // This method requires "obj" to have a forwarding pointer, and waits, if 1105 // necessary for a real one to be inserted, and returns it. 1106 1107 oop ParNewGeneration::real_forwardee(oop obj) { 1108 oop forward_ptr = obj->forwardee(); 1109 if (forward_ptr != ClaimedForwardPtr) { 1110 return forward_ptr; 1111 } else { 1112 return real_forwardee_slow(obj); 1113 } 1114 } 1115 1116 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1117 // Spin-read if it is claimed but not yet written by another thread. 1118 oop forward_ptr = obj->forwardee(); 1119 while (forward_ptr == ClaimedForwardPtr) { 1120 waste_some_time(); 1121 assert(obj->is_forwarded(), "precondition"); 1122 forward_ptr = obj->forwardee(); 1123 } 1124 return forward_ptr; 1125 } 1126 1127 #ifdef ASSERT 1128 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1129 return 1130 (p == ClaimedForwardPtr) 1131 || Universe::heap()->is_in_reserved(p); 1132 } 1133 #endif 1134 1135 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1136 if (m->must_be_preserved_for_promotion_failure(obj)) { 1137 // We should really have separate per-worker stacks, rather 1138 // than use locking of a common pair of stacks. 1139 MutexLocker ml(ParGCRareEvent_lock); 1140 preserve_mark(obj, m); 1141 } 1142 } 1143 1144 // Multiple GC threads may try to promote an object. If the object 1145 // is successfully promoted, a forwarding pointer will be installed in 1146 // the object in the young generation. This method claims the right 1147 // to install the forwarding pointer before it copies the object, 1148 // thus avoiding the need to undo the copy as in 1149 // copy_to_survivor_space_avoiding_with_undo. 1150 1151 oop ParNewGeneration::copy_to_survivor_space( 1152 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1153 // In the sequential version, this assert also says that the object is 1154 // not forwarded. That might not be the case here. It is the case that 1155 // the caller observed it to be not forwarded at some time in the past. 1156 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1157 1158 // The sequential code read "old->age()" below. That doesn't work here, 1159 // since the age is in the mark word, and that might be overwritten with 1160 // a forwarding pointer by a parallel thread. So we must save the mark 1161 // word in a local and then analyze it. 1162 oopDesc dummyOld; 1163 dummyOld.set_mark(m); 1164 assert(!dummyOld.is_forwarded(), 1165 "should not be called with forwarding pointer mark word."); 1166 1167 oop new_obj = NULL; 1168 oop forward_ptr; 1169 1170 // Try allocating obj in to-space (unless too old) 1171 if (dummyOld.age() < tenuring_threshold()) { 1172 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1173 if (new_obj == NULL) { 1174 set_survivor_overflow(true); 1175 } 1176 } 1177 1178 if (new_obj == NULL) { 1179 // Either to-space is full or we decided to promote 1180 // try allocating obj tenured 1181 1182 // Attempt to install a null forwarding pointer (atomically), 1183 // to claim the right to install the real forwarding pointer. 1184 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1185 if (forward_ptr != NULL) { 1186 // someone else beat us to it. 1187 return real_forwardee(old); 1188 } 1189 1190 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1191 old, m, sz); 1192 1193 if (new_obj == NULL) { 1194 // promotion failed, forward to self 1195 _promotion_failed = true; 1196 new_obj = old; 1197 1198 preserve_mark_if_necessary(old, m); 1199 par_scan_state->register_promotion_failure(sz); 1200 } 1201 1202 old->forward_to(new_obj); 1203 forward_ptr = NULL; 1204 } else { 1205 // Is in to-space; do copying ourselves. 1206 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1207 forward_ptr = old->forward_to_atomic(new_obj); 1208 // Restore the mark word copied above. 1209 new_obj->set_mark(m); 1210 // Increment age if obj still in new generation 1211 new_obj->incr_age(); 1212 par_scan_state->age_table()->add(new_obj, sz); 1213 } 1214 assert(new_obj != NULL, "just checking"); 1215 1216 #ifndef PRODUCT 1217 // This code must come after the CAS test, or it will print incorrect 1218 // information. 1219 if (TraceScavenge) { 1220 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1221 is_in_reserved(new_obj) ? "copying" : "tenuring", 1222 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); 1223 } 1224 #endif 1225 1226 if (forward_ptr == NULL) { 1227 oop obj_to_push = new_obj; 1228 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1229 // Length field used as index of next element to be scanned. 1230 // Real length can be obtained from real_forwardee() 1231 arrayOop(old)->set_length(0); 1232 obj_to_push = old; 1233 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1234 "push forwarded object"); 1235 } 1236 // Push it on one of the queues of to-be-scanned objects. 1237 bool simulate_overflow = false; 1238 NOT_PRODUCT( 1239 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1240 // simulate a stack overflow 1241 simulate_overflow = true; 1242 } 1243 ) 1244 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1245 // Add stats for overflow pushes. 1246 if (Verbose && PrintGCDetails) { 1247 gclog_or_tty->print("queue overflow!\n"); 1248 } 1249 push_on_overflow_list(old, par_scan_state); 1250 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1251 } 1252 1253 return new_obj; 1254 } 1255 1256 // Oops. Someone beat us to it. Undo the allocation. Where did we 1257 // allocate it? 1258 if (is_in_reserved(new_obj)) { 1259 // Must be in to_space. 1260 assert(to()->is_in_reserved(new_obj), "Checking"); 1261 if (forward_ptr == ClaimedForwardPtr) { 1262 // Wait to get the real forwarding pointer value. 1263 forward_ptr = real_forwardee(old); 1264 } 1265 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1266 } 1267 1268 return forward_ptr; 1269 } 1270 1271 #ifndef PRODUCT 1272 // It's OK to call this multi-threaded; the worst thing 1273 // that can happen is that we'll get a bunch of closely 1274 // spaced simulated overflows, but that's OK, in fact 1275 // probably good as it would exercise the overflow code 1276 // under contention. 1277 bool ParNewGeneration::should_simulate_overflow() { 1278 if (_overflow_counter-- <= 0) { // just being defensive 1279 _overflow_counter = ParGCWorkQueueOverflowInterval; 1280 return true; 1281 } else { 1282 return false; 1283 } 1284 } 1285 #endif 1286 1287 // In case we are using compressed oops, we need to be careful. 1288 // If the object being pushed is an object array, then its length 1289 // field keeps track of the "grey boundary" at which the next 1290 // incremental scan will be done (see ParGCArrayScanChunk). 1291 // When using compressed oops, this length field is kept in the 1292 // lower 32 bits of the erstwhile klass word and cannot be used 1293 // for the overflow chaining pointer (OCP below). As such the OCP 1294 // would itself need to be compressed into the top 32-bits in this 1295 // case. Unfortunately, see below, in the event that we have a 1296 // promotion failure, the node to be pushed on the list can be 1297 // outside of the Java heap, so the heap-based pointer compression 1298 // would not work (we would have potential aliasing between C-heap 1299 // and Java-heap pointers). For this reason, when using compressed 1300 // oops, we simply use a worker-thread-local, non-shared overflow 1301 // list in the form of a growable array, with a slightly different 1302 // overflow stack draining strategy. If/when we start using fat 1303 // stacks here, we can go back to using (fat) pointer chains 1304 // (although some performance comparisons would be useful since 1305 // single global lists have their own performance disadvantages 1306 // as we were made painfully aware not long ago, see 6786503). 1307 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1308 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1309 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1310 if (ParGCUseLocalOverflow) { 1311 // In the case of compressed oops, we use a private, not-shared 1312 // overflow stack. 1313 par_scan_state->push_on_overflow_stack(from_space_obj); 1314 } else { 1315 assert(!UseCompressedOops, "Error"); 1316 // if the object has been forwarded to itself, then we cannot 1317 // use the klass pointer for the linked list. Instead we have 1318 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1319 // XXX This is horribly inefficient when a promotion failure occurs 1320 // and should be fixed. XXX FIX ME !!! 1321 #ifndef PRODUCT 1322 Atomic::inc_ptr(&_num_par_pushes); 1323 assert(_num_par_pushes > 0, "Tautology"); 1324 #endif 1325 if (from_space_obj->forwardee() == from_space_obj) { 1326 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1327 listhead->forward_to(from_space_obj); 1328 from_space_obj = listhead; 1329 } 1330 oop observed_overflow_list = _overflow_list; 1331 oop cur_overflow_list; 1332 do { 1333 cur_overflow_list = observed_overflow_list; 1334 if (cur_overflow_list != BUSY) { 1335 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1336 } else { 1337 from_space_obj->set_klass_to_list_ptr(NULL); 1338 } 1339 observed_overflow_list = 1340 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1341 } while (cur_overflow_list != observed_overflow_list); 1342 } 1343 } 1344 1345 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1346 bool res; 1347 1348 if (ParGCUseLocalOverflow) { 1349 res = par_scan_state->take_from_overflow_stack(); 1350 } else { 1351 assert(!UseCompressedOops, "Error"); 1352 res = take_from_overflow_list_work(par_scan_state); 1353 } 1354 return res; 1355 } 1356 1357 1358 // *NOTE*: The overflow list manipulation code here and 1359 // in CMSCollector:: are very similar in shape, 1360 // except that in the CMS case we thread the objects 1361 // directly into the list via their mark word, and do 1362 // not need to deal with special cases below related 1363 // to chunking of object arrays and promotion failure 1364 // handling. 1365 // CR 6797058 has been filed to attempt consolidation of 1366 // the common code. 1367 // Because of the common code, if you make any changes in 1368 // the code below, please check the CMS version to see if 1369 // similar changes might be needed. 1370 // See CMSCollector::par_take_from_overflow_list() for 1371 // more extensive documentation comments. 1372 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1373 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1374 // How many to take? 1375 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1376 (size_t)ParGCDesiredObjsFromOverflowList); 1377 1378 assert(!UseCompressedOops, "Error"); 1379 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1380 if (_overflow_list == NULL) return false; 1381 1382 // Otherwise, there was something there; try claiming the list. 1383 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1384 // Trim off a prefix of at most objsFromOverflow items 1385 Thread* tid = Thread::current(); 1386 size_t spin_count = (size_t)ParallelGCThreads; 1387 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1388 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1389 // someone grabbed it before we did ... 1390 // ... we spin for a short while... 1391 os::sleep(tid, sleep_time_millis, false); 1392 if (_overflow_list == NULL) { 1393 // nothing left to take 1394 return false; 1395 } else if (_overflow_list != BUSY) { 1396 // try and grab the prefix 1397 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1398 } 1399 } 1400 if (prefix == NULL || prefix == BUSY) { 1401 // Nothing to take or waited long enough 1402 if (prefix == NULL) { 1403 // Write back the NULL in case we overwrote it with BUSY above 1404 // and it is still the same value. 1405 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1406 } 1407 return false; 1408 } 1409 assert(prefix != NULL && prefix != BUSY, "Error"); 1410 size_t i = 1; 1411 oop cur = prefix; 1412 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1413 i++; cur = cur->list_ptr_from_klass(); 1414 } 1415 1416 // Reattach remaining (suffix) to overflow list 1417 if (cur->klass_or_null() == NULL) { 1418 // Write back the NULL in lieu of the BUSY we wrote 1419 // above and it is still the same value. 1420 if (_overflow_list == BUSY) { 1421 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1422 } 1423 } else { 1424 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1425 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1426 cur->set_klass_to_list_ptr(NULL); // break off suffix 1427 // It's possible that the list is still in the empty(busy) state 1428 // we left it in a short while ago; in that case we may be 1429 // able to place back the suffix. 1430 oop observed_overflow_list = _overflow_list; 1431 oop cur_overflow_list = observed_overflow_list; 1432 bool attached = false; 1433 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1434 observed_overflow_list = 1435 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1436 if (cur_overflow_list == observed_overflow_list) { 1437 attached = true; 1438 break; 1439 } else cur_overflow_list = observed_overflow_list; 1440 } 1441 if (!attached) { 1442 // Too bad, someone else got in in between; we'll need to do a splice. 1443 // Find the last item of suffix list 1444 oop last = suffix; 1445 while (last->klass_or_null() != NULL) { 1446 last = last->list_ptr_from_klass(); 1447 } 1448 // Atomically prepend suffix to current overflow list 1449 observed_overflow_list = _overflow_list; 1450 do { 1451 cur_overflow_list = observed_overflow_list; 1452 if (cur_overflow_list != BUSY) { 1453 // Do the splice ... 1454 last->set_klass_to_list_ptr(cur_overflow_list); 1455 } else { // cur_overflow_list == BUSY 1456 last->set_klass_to_list_ptr(NULL); 1457 } 1458 observed_overflow_list = 1459 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1460 } while (cur_overflow_list != observed_overflow_list); 1461 } 1462 } 1463 1464 // Push objects on prefix list onto this thread's work queue 1465 assert(prefix != NULL && prefix != BUSY, "program logic"); 1466 cur = prefix; 1467 ssize_t n = 0; 1468 while (cur != NULL) { 1469 oop obj_to_push = cur->forwardee(); 1470 oop next = cur->list_ptr_from_klass(); 1471 cur->set_klass(obj_to_push->klass()); 1472 // This may be an array object that is self-forwarded. In that case, the list pointer 1473 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1474 if (!is_in_reserved(cur)) { 1475 // This can become a scaling bottleneck when there is work queue overflow coincident 1476 // with promotion failure. 1477 oopDesc* f = cur; 1478 FREE_C_HEAP_ARRAY(oopDesc, f); 1479 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1480 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1481 obj_to_push = cur; 1482 } 1483 bool ok = work_q->push(obj_to_push); 1484 assert(ok, "Should have succeeded"); 1485 cur = next; 1486 n++; 1487 } 1488 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1489 #ifndef PRODUCT 1490 assert(_num_par_pushes >= n, "Too many pops?"); 1491 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1492 #endif 1493 return true; 1494 } 1495 #undef BUSY 1496 1497 void ParNewGeneration::ref_processor_init() { 1498 if (_ref_processor == NULL) { 1499 // Allocate and initialize a reference processor 1500 _ref_processor = 1501 new ReferenceProcessor(_reserved, // span 1502 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1503 (int) ParallelGCThreads, // mt processing degree 1504 refs_discovery_is_mt(), // mt discovery 1505 (int) ParallelGCThreads, // mt discovery degree 1506 refs_discovery_is_atomic(), // atomic_discovery 1507 NULL); // is_alive_non_header 1508 } 1509 } 1510 1511 const char* ParNewGeneration::name() const { 1512 return "par new generation"; 1513 }