1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/copyFailedInfo.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" 37 #include "gc_implementation/shared/spaceDecorator.hpp" 38 #include "memory/defNewGeneration.inline.hpp" 39 #include "memory/genCollectedHeap.hpp" 40 #include "memory/genOopClosures.inline.hpp" 41 #include "memory/generation.hpp" 42 #include "memory/generation.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/sharedHeap.hpp" 46 #include "memory/space.hpp" 47 #include "oops/objArrayOop.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "oops/oop.pcgc.inline.hpp" 50 #include "runtime/atomic.inline.hpp" 51 #include "runtime/handles.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/globalDefinitions.hpp" 57 #include "utilities/workgroup.hpp" 58 59 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 60 61 #ifdef _MSC_VER 62 #pragma warning( push ) 63 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 64 #endif 65 ParScanThreadState::ParScanThreadState(Space* to_space_, 66 ParNewGeneration* young_gen_, 67 Generation* old_gen_, 68 int thread_num_, 69 ObjToScanQueueSet* work_queue_set_, 70 Stack<oop, mtGC>* overflow_stacks_, 71 size_t desired_plab_sz_, 72 ParallelTaskTerminator& term_) 73 : _to_space(to_space_), 74 _old_gen(old_gen_), 75 _young_gen(young_gen_), 76 _thread_num(thread_num_), 77 _work_queue(work_queue_set_->queue(thread_num_)), 78 _to_space_full(false), 79 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 80 _ageTable(false), // false ==> not the global age table, no perf data. 81 _to_space_alloc_buffer(desired_plab_sz_), 82 _to_space_closure(young_gen_, this), 83 _old_gen_closure(young_gen_, this), 84 _to_space_root_closure(young_gen_, this), 85 _old_gen_root_closure(young_gen_, this), 86 _older_gen_closure(young_gen_, this), 87 _evacuate_followers(this, 88 &_to_space_closure, 89 &_old_gen_closure, 90 &_to_space_root_closure, 91 young_gen_, 92 &_old_gen_root_closure, 93 work_queue_set_, 94 &term_), 95 _is_alive_closure(young_gen_), 96 _scan_weak_ref_closure(young_gen_, this), 97 _keep_alive_closure(&_scan_weak_ref_closure), 98 _strong_roots_time(0.0), 99 _term_time(0.0) { 100 #if TASKQUEUE_STATS 101 _term_attempts = 0; 102 _overflow_refills = 0; 103 _overflow_refill_objs = 0; 104 #endif // TASKQUEUE_STATS 105 106 _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 107 _hash_seed = 17; // Might want to take time-based random value. 108 _start = os::elapsedTime(); 109 _old_gen_closure.set_generation(old_gen_); 110 _old_gen_root_closure.set_generation(old_gen_); 111 } 112 #ifdef _MSC_VER 113 #pragma warning( pop ) 114 #endif 115 116 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 117 size_t plab_word_size) { 118 ChunkArray* sca = survivor_chunk_array(); 119 if (sca != NULL) { 120 // A non-null SCA implies that we want the PLAB data recorded. 121 sca->record_sample(plab_start, plab_word_size); 122 } 123 } 124 125 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 126 return new_obj->is_objArray() && 127 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 128 new_obj != old_obj; 129 } 130 131 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 132 assert(old->is_objArray(), "must be obj array"); 133 assert(old->is_forwarded(), "must be forwarded"); 134 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 135 assert(!old_gen()->is_in(old), "must be in young generation."); 136 137 objArrayOop obj = objArrayOop(old->forwardee()); 138 // Process ParGCArrayScanChunk elements now 139 // and push the remainder back onto queue 140 int start = arrayOop(old)->length(); 141 int end = obj->length(); 142 int remainder = end - start; 143 assert(start <= end, "just checking"); 144 if (remainder > 2 * ParGCArrayScanChunk) { 145 // Test above combines last partial chunk with a full chunk 146 end = start + ParGCArrayScanChunk; 147 arrayOop(old)->set_length(end); 148 // Push remainder. 149 bool ok = work_queue()->push(old); 150 assert(ok, "just popped, push must be okay"); 151 } else { 152 // Restore length so that it can be used if there 153 // is a promotion failure and forwarding pointers 154 // must be removed. 155 arrayOop(old)->set_length(end); 156 } 157 158 // process our set of indices (include header in first chunk) 159 // should make sure end is even (aligned to HeapWord in case of compressed oops) 160 if ((HeapWord *)obj < young_old_boundary()) { 161 // object is in to_space 162 obj->oop_iterate_range(&_to_space_closure, start, end); 163 } else { 164 // object is in old generation 165 obj->oop_iterate_range(&_old_gen_closure, start, end); 166 } 167 } 168 169 void ParScanThreadState::trim_queues(int max_size) { 170 ObjToScanQueue* queue = work_queue(); 171 do { 172 while (queue->size() > (juint)max_size) { 173 oop obj_to_scan; 174 if (queue->pop_local(obj_to_scan)) { 175 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 176 if (obj_to_scan->is_objArray() && 177 obj_to_scan->is_forwarded() && 178 obj_to_scan->forwardee() != obj_to_scan) { 179 scan_partial_array_and_push_remainder(obj_to_scan); 180 } else { 181 // object is in to_space 182 obj_to_scan->oop_iterate(&_to_space_closure); 183 } 184 } else { 185 // object is in old generation 186 obj_to_scan->oop_iterate(&_old_gen_closure); 187 } 188 } 189 } 190 // For the case of compressed oops, we have a private, non-shared 191 // overflow stack, so we eagerly drain it so as to more evenly 192 // distribute load early. Note: this may be good to do in 193 // general rather than delay for the final stealing phase. 194 // If applicable, we'll transfer a set of objects over to our 195 // work queue, allowing them to be stolen and draining our 196 // private overflow stack. 197 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 198 } 199 200 bool ParScanThreadState::take_from_overflow_stack() { 201 assert(ParGCUseLocalOverflow, "Else should not call"); 202 assert(young_gen()->overflow_list() == NULL, "Error"); 203 ObjToScanQueue* queue = work_queue(); 204 Stack<oop, mtGC>* const of_stack = overflow_stack(); 205 const size_t num_overflow_elems = of_stack->size(); 206 const size_t space_available = queue->max_elems() - queue->size(); 207 const size_t num_take_elems = MIN3(space_available / 4, 208 ParGCDesiredObjsFromOverflowList, 209 num_overflow_elems); 210 // Transfer the most recent num_take_elems from the overflow 211 // stack to our work queue. 212 for (size_t i = 0; i != num_take_elems; i++) { 213 oop cur = of_stack->pop(); 214 oop obj_to_push = cur->forwardee(); 215 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 216 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 217 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 218 if (should_be_partially_scanned(obj_to_push, cur)) { 219 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 220 obj_to_push = cur; 221 } 222 bool ok = queue->push(obj_to_push); 223 assert(ok, "Should have succeeded"); 224 } 225 assert(young_gen()->overflow_list() == NULL, "Error"); 226 return num_take_elems > 0; // was something transferred? 227 } 228 229 void ParScanThreadState::push_on_overflow_stack(oop p) { 230 assert(ParGCUseLocalOverflow, "Else should not call"); 231 overflow_stack()->push(p); 232 assert(young_gen()->overflow_list() == NULL, "Error"); 233 } 234 235 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 236 // If the object is small enough, try to reallocate the buffer. 237 HeapWord* obj = NULL; 238 if (!_to_space_full) { 239 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 240 Space* const sp = to_space(); 241 if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { 242 // Is small enough; abandon this buffer and start a new one. 243 plab->retire(false, false); 244 size_t buf_size = plab->word_sz(); 245 HeapWord* buf_space = sp->par_allocate(buf_size); 246 if (buf_space == NULL) { 247 const size_t min_bytes = 248 ParGCAllocBuffer::min_size() << LogHeapWordSize; 249 size_t free_bytes = sp->free(); 250 while(buf_space == NULL && free_bytes >= min_bytes) { 251 buf_size = free_bytes >> LogHeapWordSize; 252 assert(buf_size == (size_t)align_object_size(buf_size), 253 "Invariant"); 254 buf_space = sp->par_allocate(buf_size); 255 free_bytes = sp->free(); 256 } 257 } 258 if (buf_space != NULL) { 259 plab->set_word_size(buf_size); 260 plab->set_buf(buf_space); 261 record_survivor_plab(buf_space, buf_size); 262 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 263 // Note that we cannot compare buf_size < word_sz below 264 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 265 assert(obj != NULL || plab->words_remaining() < word_sz, 266 "Else should have been able to allocate"); 267 // It's conceivable that we may be able to use the 268 // buffer we just grabbed for subsequent small requests 269 // even if not for this one. 270 } else { 271 // We're used up. 272 _to_space_full = true; 273 } 274 275 } else { 276 // Too large; allocate the object individually. 277 obj = sp->par_allocate(word_sz); 278 } 279 } 280 return obj; 281 } 282 283 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 284 // Is the alloc in the current alloc buffer? 285 if (to_space_alloc_buffer()->contains(obj)) { 286 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 287 "Should contain whole object."); 288 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 289 } else { 290 CollectedHeap::fill_with_object(obj, word_sz); 291 } 292 } 293 294 void ParScanThreadState::print_promotion_failure_size() { 295 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 296 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 297 _thread_num, _promotion_failed_info.first_size()); 298 } 299 } 300 301 class ParScanThreadStateSet: private ResourceArray { 302 public: 303 // Initializes states for the specified number of threads; 304 ParScanThreadStateSet(int num_threads, 305 Space& to_space, 306 ParNewGeneration& gen, 307 Generation& old_gen, 308 ObjToScanQueueSet& queue_set, 309 Stack<oop, mtGC>* overflow_stacks, 310 size_t desired_plab_sz, 311 ParallelTaskTerminator& term); 312 313 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 314 315 inline ParScanThreadState& thread_state(int i); 316 317 void trace_promotion_failed(YoungGCTracer& gc_tracer); 318 void reset(int active_workers, bool promotion_failed); 319 void flush(); 320 321 #if TASKQUEUE_STATS 322 static void 323 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 324 void print_termination_stats(outputStream* const st = gclog_or_tty); 325 static void 326 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 327 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 328 void reset_stats(); 329 #endif // TASKQUEUE_STATS 330 331 private: 332 ParallelTaskTerminator& _term; 333 ParNewGeneration& _gen; 334 Generation& _old_gen; 335 public: 336 bool is_valid(int id) const { return id < length(); } 337 ParallelTaskTerminator* terminator() { return &_term; } 338 }; 339 340 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, 341 Space& to_space, 342 ParNewGeneration& gen, 343 Generation& old_gen, 344 ObjToScanQueueSet& queue_set, 345 Stack<oop, mtGC>* overflow_stacks, 346 size_t desired_plab_sz, 347 ParallelTaskTerminator& term) 348 : ResourceArray(sizeof(ParScanThreadState), num_threads), 349 _gen(gen), 350 _old_gen(old_gen), 351 _term(term) { 352 assert(num_threads > 0, "sanity check!"); 353 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 354 "overflow_stack allocation mismatch"); 355 // Initialize states. 356 for (int i = 0; i < num_threads; ++i) { 357 new ((ParScanThreadState*)_data + i) 358 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 359 overflow_stacks, desired_plab_sz, term); 360 } 361 } 362 363 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { 364 assert(i >= 0 && i < length(), "sanity check!"); 365 return ((ParScanThreadState*)_data)[i]; 366 } 367 368 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { 369 for (int i = 0; i < length(); ++i) { 370 if (thread_state(i).promotion_failed()) { 371 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); 372 thread_state(i).promotion_failed_info().reset(); 373 } 374 } 375 } 376 377 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { 378 _term.reset_for_reuse(active_threads); 379 if (promotion_failed) { 380 for (int i = 0; i < length(); ++i) { 381 thread_state(i).print_promotion_failure_size(); 382 } 383 } 384 } 385 386 #if TASKQUEUE_STATS 387 void 388 ParScanThreadState::reset_stats() { 389 taskqueue_stats().reset(); 390 _term_attempts = 0; 391 _overflow_refills = 0; 392 _overflow_refill_objs = 0; 393 } 394 395 void ParScanThreadStateSet::reset_stats() { 396 for (int i = 0; i < length(); ++i) { 397 thread_state(i).reset_stats(); 398 } 399 } 400 401 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { 402 st->print_raw_cr("GC Termination Stats"); 403 st->print_raw_cr(" elapsed --strong roots-- " 404 "-------termination-------"); 405 st->print_raw_cr("thr ms ms % " 406 " ms % attempts"); 407 st->print_raw_cr("--- --------- --------- ------ " 408 "--------- ------ --------"); 409 } 410 411 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { 412 print_termination_stats_hdr(st); 413 414 for (int i = 0; i < length(); ++i) { 415 const ParScanThreadState & pss = thread_state(i); 416 const double elapsed_ms = pss.elapsed_time() * 1000.0; 417 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 418 const double term_ms = pss.term_time() * 1000.0; 419 st->print_cr("%3d %9.2f %9.2f %6.2f " 420 "%9.2f %6.2f " SIZE_FORMAT_W(8), 421 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 422 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 423 } 424 } 425 426 // Print stats related to work queue activity. 427 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { 428 st->print_raw_cr("GC Task Stats"); 429 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 430 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 431 } 432 433 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { 434 print_taskqueue_stats_hdr(st); 435 436 TaskQueueStats totals; 437 for (int i = 0; i < length(); ++i) { 438 const ParScanThreadState & pss = thread_state(i); 439 const TaskQueueStats & stats = pss.taskqueue_stats(); 440 st->print("%3d ", i); stats.print(st); st->cr(); 441 totals += stats; 442 443 if (pss.overflow_refills() > 0) { 444 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 445 SIZE_FORMAT_W(10) " overflow objects", 446 pss.overflow_refills(), pss.overflow_refill_objs()); 447 } 448 } 449 st->print("tot "); totals.print(st); st->cr(); 450 451 DEBUG_ONLY(totals.verify()); 452 } 453 #endif // TASKQUEUE_STATS 454 455 void ParScanThreadStateSet::flush() { 456 // Work in this loop should be kept as lightweight as 457 // possible since this might otherwise become a bottleneck 458 // to scaling. Should we add heavy-weight work into this 459 // loop, consider parallelizing the loop into the worker threads. 460 for (int i = 0; i < length(); ++i) { 461 ParScanThreadState& par_scan_state = thread_state(i); 462 463 // Flush stats related to To-space PLAB activity and 464 // retire the last buffer. 465 par_scan_state.to_space_alloc_buffer()-> 466 flush_stats_and_retire(_gen.plab_stats(), 467 true /* end_of_gc */, 468 false /* retain */); 469 470 // Every thread has its own age table. We need to merge 471 // them all into one. 472 ageTable *local_table = par_scan_state.age_table(); 473 _gen.age_table()->merge(local_table); 474 475 // Inform old gen that we're done. 476 _old_gen.par_promote_alloc_done(i); 477 _old_gen.par_oop_since_save_marks_iterate_done(i); 478 } 479 480 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 481 // We need to call this even when ResizeOldPLAB is disabled 482 // so as to avoid breaking some asserts. While we may be able 483 // to avoid this by reorganizing the code a bit, I am loathe 484 // to do that unless we find cases where ergo leads to bad 485 // performance. 486 CFLS_LAB::compute_desired_plab_size(); 487 } 488 } 489 490 ParScanClosure::ParScanClosure(ParNewGeneration* g, 491 ParScanThreadState* par_scan_state) 492 : OopsInKlassOrGenClosure(g), 493 _par_scan_state(par_scan_state), 494 _g(g) { 495 _boundary = _g->reserved().end(); 496 } 497 498 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 499 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 500 501 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 502 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 503 504 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 505 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 506 507 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 508 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 509 510 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 511 ParScanThreadState* par_scan_state) 512 : ScanWeakRefClosure(g), 513 _par_scan_state(par_scan_state) { 514 } 515 516 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 517 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 518 519 #ifdef WIN32 520 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 521 #endif 522 523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(ParScanThreadState* par_scan_state, 524 ParScanWithoutBarrierClosure* to_space_closure, 525 ParScanWithBarrierClosure* old_gen_closure, 526 ParRootScanWithoutBarrierClosure* to_space_root_closure, 527 ParNewGeneration* par_gen, 528 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure, 529 ObjToScanQueueSet* task_queues, 530 ParallelTaskTerminator* terminator) 531 : _par_scan_state(par_scan_state), 532 _to_space_closure(to_space_closure), 533 _old_gen_closure(old_gen_closure), 534 _to_space_root_closure(to_space_root_closure), 535 _old_gen_root_closure(old_gen_root_closure), 536 _par_gen(par_gen), 537 _task_queues(task_queues), 538 _terminator(terminator) { 539 } 540 541 void ParEvacuateFollowersClosure::do_void() { 542 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 543 544 while (true) { 545 // Scan to-space and old-gen objs until we run out of both. 546 oop obj_to_scan; 547 par_scan_state()->trim_queues(0); 548 549 // We have no local work, attempt to steal from other threads. 550 551 // attempt to steal work from promoted. 552 if (task_queues()->steal(par_scan_state()->thread_num(), 553 par_scan_state()->hash_seed(), 554 obj_to_scan)) { 555 bool res = work_q->push(obj_to_scan); 556 assert(res, "Empty queue should have room for a push."); 557 558 // if successful, goto Start. 559 continue; 560 561 // try global overflow list. 562 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 563 continue; 564 } 565 566 // Otherwise, offer termination. 567 par_scan_state()->start_term_time(); 568 if (terminator()->offer_termination()) break; 569 par_scan_state()->end_term_time(); 570 } 571 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 572 "Broken overflow list?"); 573 // Finish the last termination pause. 574 par_scan_state()->end_term_time(); 575 } 576 577 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, 578 Generation* old_gen, 579 HeapWord* young_old_boundary, 580 ParScanThreadStateSet* state_set) 581 : AbstractGangTask("ParNewGeneration collection"), 582 _young_gen(young_gen), _old_gen(old_gen), 583 _young_old_boundary(young_old_boundary), 584 _state_set(state_set) { 585 } 586 587 // Reset the terminator for the given number of 588 // active threads. 589 void ParNewGenTask::set_for_termination(int active_workers) { 590 _state_set->reset(active_workers, _young_gen->promotion_failed()); 591 // Should the heap be passed in? There's only 1 for now so 592 // grab it instead. 593 GenCollectedHeap* gch = GenCollectedHeap::heap(); 594 gch->set_n_termination(active_workers); 595 } 596 597 void ParNewGenTask::work(uint worker_id) { 598 GenCollectedHeap* gch = GenCollectedHeap::heap(); 599 // Since this is being done in a separate thread, need new resource 600 // and handle marks. 601 ResourceMark rm; 602 HandleMark hm; 603 604 Generation* old_gen = gch->old_gen(); 605 606 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 607 assert(_state_set->is_valid(worker_id), "Should not have been called"); 608 609 par_scan_state.set_young_old_boundary(_young_old_boundary); 610 611 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 612 gch->rem_set()->klass_rem_set()); 613 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 614 &par_scan_state.to_space_root_closure(), 615 false); 616 617 par_scan_state.start_strong_roots(); 618 gch->gen_process_roots(Generation::Young, 619 true, // Process younger gens, if any, 620 // as strong roots. 621 false, // no scope; this is parallel code 622 SharedHeap::SO_ScavengeCodeCache, 623 GenCollectedHeap::StrongAndWeakRoots, 624 &par_scan_state.to_space_root_closure(), 625 &par_scan_state.older_gen_closure(), 626 &cld_scan_closure); 627 628 par_scan_state.end_strong_roots(); 629 630 // "evacuate followers". 631 par_scan_state.evacuate_followers_closure().do_void(); 632 } 633 634 #ifdef _MSC_VER 635 #pragma warning( push ) 636 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 637 #endif 638 ParNewGeneration:: 639 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) 640 : DefNewGeneration(rs, initial_byte_size, "PCopy"), 641 _overflow_list(NULL), 642 _is_alive_closure(this), 643 _plab_stats(YoungPLABSize, PLABWeight) { 644 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 645 NOT_PRODUCT(_num_par_pushes = 0;) 646 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 647 guarantee(_task_queues != NULL, "task_queues allocation failure."); 648 649 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 650 ObjToScanQueue *q = new ObjToScanQueue(); 651 guarantee(q != NULL, "work_queue Allocation failure."); 652 _task_queues->register_queue(i1, q); 653 } 654 655 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) { 656 _task_queues->queue(i2)->initialize(); 657 } 658 659 _overflow_stacks = NULL; 660 if (ParGCUseLocalOverflow) { 661 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 662 // with ',' 663 typedef Stack<oop, mtGC> GCOopStack; 664 665 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 666 for (size_t i = 0; i < ParallelGCThreads; ++i) { 667 new (_overflow_stacks + i) Stack<oop, mtGC>(); 668 } 669 } 670 671 if (UsePerfData) { 672 EXCEPTION_MARK; 673 ResourceMark rm; 674 675 const char* cname = 676 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 677 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 678 ParallelGCThreads, CHECK); 679 } 680 } 681 #ifdef _MSC_VER 682 #pragma warning( pop ) 683 #endif 684 685 // ParNewGeneration:: 686 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) 687 : DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) { 688 } 689 690 template <class T> 691 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 692 #ifdef ASSERT 693 { 694 assert(!oopDesc::is_null(*p), "expected non-null ref"); 695 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 696 // We never expect to see a null reference being processed 697 // as a weak reference. 698 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 699 } 700 #endif // ASSERT 701 702 _par_cl->do_oop_nv(p); 703 704 if (Universe::heap()->is_in_reserved(p)) { 705 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 706 _rs->write_ref_field_gc_par(p, obj); 707 } 708 } 709 710 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 711 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 712 713 // ParNewGeneration:: 714 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) 715 : DefNewGeneration::KeepAliveClosure(cl) { 716 } 717 718 template <class T> 719 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 720 #ifdef ASSERT 721 { 722 assert(!oopDesc::is_null(*p), "expected non-null ref"); 723 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 724 // We never expect to see a null reference being processed 725 // as a weak reference. 726 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 727 } 728 #endif // ASSERT 729 730 _cl->do_oop_nv(p); 731 732 if (Universe::heap()->is_in_reserved(p)) { 733 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 734 _rs->write_ref_field_gc_par(p, obj); 735 } 736 } 737 738 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 739 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 740 741 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 742 T heap_oop = oopDesc::load_heap_oop(p); 743 if (!oopDesc::is_null(heap_oop)) { 744 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 745 if ((HeapWord*)obj < _boundary) { 746 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 747 oop new_obj = obj->is_forwarded() 748 ? obj->forwardee() 749 : _g->DefNewGeneration::copy_to_survivor_space(obj); 750 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 751 } 752 if (_gc_barrier) { 753 // If p points to a younger generation, mark the card. 754 if ((HeapWord*)obj < _gen_boundary) { 755 _rs->write_ref_field_gc_par(p, obj); 756 } 757 } 758 } 759 } 760 761 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 762 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 763 764 class ParNewRefProcTaskProxy: public AbstractGangTask { 765 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 766 public: 767 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 768 Generation& old_gen, 769 HeapWord* young_old_boundary, 770 ParScanThreadStateSet& state_set); 771 772 private: 773 virtual void work(uint worker_id); 774 virtual void set_for_termination(int active_workers) { 775 _state_set.terminator()->reset_for_reuse(active_workers); 776 } 777 private: 778 ParNewGeneration& _young_gen; 779 ProcessTask& _task; 780 Generation& _old_gen; 781 HeapWord* _young_old_boundary; 782 ParScanThreadStateSet& _state_set; 783 }; 784 785 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 786 ParNewGeneration& young_gen, 787 Generation& old_gen, 788 HeapWord* young_old_boundary, 789 ParScanThreadStateSet& state_set) 790 : AbstractGangTask("ParNewGeneration parallel reference processing"), 791 _young_gen(young_gen), 792 _task(task), 793 _old_gen(old_gen), 794 _young_old_boundary(young_old_boundary), 795 _state_set(state_set) { 796 } 797 798 void ParNewRefProcTaskProxy::work(uint worker_id) { 799 ResourceMark rm; 800 HandleMark hm; 801 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 802 par_scan_state.set_young_old_boundary(_young_old_boundary); 803 _task.work(worker_id, par_scan_state.is_alive_closure(), 804 par_scan_state.keep_alive_closure(), 805 par_scan_state.evacuate_followers_closure()); 806 } 807 808 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 809 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 810 EnqueueTask& _task; 811 812 public: 813 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 814 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 815 _task(task) { 816 } 817 818 virtual void work(uint worker_id) { 819 _task.work(worker_id); 820 } 821 }; 822 823 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { 824 GenCollectedHeap* gch = GenCollectedHeap::heap(); 825 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 826 "not a generational heap"); 827 FlexibleWorkGang* workers = gch->workers(); 828 assert(workers != NULL, "Need parallel worker threads."); 829 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 830 ParNewRefProcTaskProxy rp_task(task, _generation, *(gch->old_gen()), 831 _generation.reserved().end(), _state_set); 832 workers->run_task(&rp_task); 833 _state_set.reset(0 /* bad value in debug if not reset */, 834 _generation.promotion_failed()); 835 } 836 837 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { 838 GenCollectedHeap* gch = GenCollectedHeap::heap(); 839 FlexibleWorkGang* workers = gch->workers(); 840 assert(workers != NULL, "Need parallel worker threads."); 841 ParNewRefEnqueueTaskProxy enq_task(task); 842 workers->run_task(&enq_task); 843 } 844 845 void ParNewRefProcTaskExecutor::set_single_threaded_mode() { 846 _state_set.flush(); 847 GenCollectedHeap* gch = GenCollectedHeap::heap(); 848 gch->set_par_threads(0); // 0 ==> non-parallel. 849 gch->save_marks(); 850 } 851 852 ScanClosureWithParBarrier::ScanClosureWithParBarrier(ParNewGeneration* g, 853 bool gc_barrier) 854 : ScanClosure(g, gc_barrier) { 855 } 856 857 EvacuateFollowersClosureGeneral:: 858 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, 859 OopsInGenClosure* cur, 860 OopsInGenClosure* older) 861 : _gch(gch), 862 _scan_cur_or_nonheap(cur), 863 _scan_older(older) { 864 } 865 866 void EvacuateFollowersClosureGeneral::do_void() { 867 do { 868 // Beware: this call will lead to closure applications via virtual 869 // calls. 870 _gch->oop_since_save_marks_iterate(Generation::Young, 871 _scan_cur_or_nonheap, 872 _scan_older); 873 } while (!_gch->no_allocs_since_save_marks(true /* include_young */)); 874 } 875 876 877 // A Generation that does parallel young-gen collection. 878 879 bool ParNewGeneration::_avoid_promotion_undo = false; 880 881 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, 882 ParScanThreadStateSet& thread_state_set, 883 ParNewTracer& gc_tracer) { 884 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 885 _promo_failure_scan_stack.clear(true); // Clear cached segments. 886 887 remove_forwarding_pointers(); 888 if (PrintGCDetails) { 889 gclog_or_tty->print(" (promotion failed)"); 890 } 891 // All the spaces are in play for mark-sweep. 892 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 893 from()->set_next_compaction_space(to()); 894 gch->set_incremental_collection_failed(); 895 // Inform the next generation that a promotion failure occurred. 896 _old_gen->promotion_failure_occurred(); 897 898 // Trace promotion failure in the parallel GC threads 899 thread_state_set.trace_promotion_failed(gc_tracer); 900 // Single threaded code may have reported promotion failure to the global state 901 if (_promotion_failed_info.has_failed()) { 902 gc_tracer.report_promotion_failed(_promotion_failed_info); 903 } 904 // Reset the PromotionFailureALot counters. 905 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 906 } 907 908 void ParNewGeneration::collect(bool full, 909 bool clear_all_soft_refs, 910 size_t size, 911 bool is_tlab) { 912 assert(full || size > 0, "otherwise we don't want to collect"); 913 914 GenCollectedHeap* gch = GenCollectedHeap::heap(); 915 916 _gc_timer->register_gc_start(); 917 918 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 919 "not a CMS generational heap"); 920 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 921 FlexibleWorkGang* workers = gch->workers(); 922 assert(workers != NULL, "Need workgang for parallel work"); 923 int active_workers = 924 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 925 workers->active_workers(), 926 Threads::number_of_non_daemon_threads()); 927 workers->set_active_workers(active_workers); 928 _old_gen = gch->old_gen(); 929 // Do we have to avoid promotion_undo? 930 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 931 set_avoid_promotion_undo(true); 932 } 933 934 // If the next generation is too full to accommodate worst-case promotion 935 // from this generation, pass on collection; let the next generation 936 // do it. 937 if (!collection_attempt_is_safe()) { 938 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 939 return; 940 } 941 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 942 943 ParNewTracer gc_tracer; 944 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 945 gch->trace_heap_before_gc(&gc_tracer); 946 947 init_assuming_no_promotion_failure(); 948 949 if (UseAdaptiveSizePolicy) { 950 set_survivor_overflow(false); 951 size_policy->minor_collection_begin(); 952 } 953 954 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 955 // Capture heap used before collection (for printing). 956 size_t gch_prev_used = gch->used(); 957 958 SpecializationStats::clear(); 959 960 age_table()->clear(); 961 to()->clear(SpaceDecorator::Mangle); 962 963 gch->save_marks(); 964 assert(workers != NULL, "Need parallel worker threads."); 965 int n_workers = active_workers; 966 967 // Set the correct parallelism (number of queues) in the reference processor 968 ref_processor()->set_active_mt_degree(n_workers); 969 970 // Always set the terminator for the active number of workers 971 // because only those workers go through the termination protocol. 972 ParallelTaskTerminator _term(n_workers, task_queues()); 973 ParScanThreadStateSet thread_state_set(workers->active_workers(), 974 *to(), *this, *_old_gen, *task_queues(), 975 _overflow_stacks, desired_plab_sz(), _term); 976 977 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set); 978 gch->set_par_threads(n_workers); 979 gch->rem_set()->prepare_for_younger_refs_iterate(true); 980 // It turns out that even when we're using 1 thread, doing the work in a 981 // separate thread causes wide variance in run times. We can't help this 982 // in the multi-threaded case, but we special-case n=1 here to get 983 // repeatable measurements of the 1-thread overhead of the parallel code. 984 if (n_workers > 1) { 985 GenCollectedHeap::StrongRootsScope srs(gch); 986 workers->run_task(&tsk); 987 } else { 988 GenCollectedHeap::StrongRootsScope srs(gch); 989 tsk.work(0); 990 } 991 thread_state_set.reset(0 /* Bad value in debug if not reset */, 992 promotion_failed()); 993 994 // Process (weak) reference objects found during scavenge. 995 ReferenceProcessor* rp = ref_processor(); 996 IsAliveClosure is_alive(this); 997 ScanWeakRefClosure scan_weak_ref(this); 998 KeepAliveClosure keep_alive(&scan_weak_ref); 999 ScanClosure scan_without_gc_barrier(this, false); 1000 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 1001 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 1002 EvacuateFollowersClosureGeneral evacuate_followers(gch, 1003 &scan_without_gc_barrier, &scan_with_gc_barrier); 1004 rp->setup_policy(clear_all_soft_refs); 1005 // Can the mt_degree be set later (at run_task() time would be best)? 1006 rp->set_active_mt_degree(active_workers); 1007 ReferenceProcessorStats stats; 1008 if (rp->processing_is_mt()) { 1009 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1010 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1011 &evacuate_followers, &task_executor, 1012 _gc_timer, gc_tracer.gc_id()); 1013 } else { 1014 thread_state_set.flush(); 1015 gch->set_par_threads(0); // 0 ==> non-parallel. 1016 gch->save_marks(); 1017 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1018 &evacuate_followers, NULL, 1019 _gc_timer, gc_tracer.gc_id()); 1020 } 1021 gc_tracer.report_gc_reference_stats(stats); 1022 if (!promotion_failed()) { 1023 // Swap the survivor spaces. 1024 eden()->clear(SpaceDecorator::Mangle); 1025 from()->clear(SpaceDecorator::Mangle); 1026 if (ZapUnusedHeapArea) { 1027 // This is now done here because of the piece-meal mangling which 1028 // can check for valid mangling at intermediate points in the 1029 // collection(s). When a minor collection fails to collect 1030 // sufficient space resizing of the young generation can occur 1031 // an redistribute the spaces in the young generation. Mangle 1032 // here so that unzapped regions don't get distributed to 1033 // other spaces. 1034 to()->mangle_unused_area(); 1035 } 1036 swap_spaces(); 1037 1038 // A successful scavenge should restart the GC time limit count which is 1039 // for full GC's. 1040 size_policy->reset_gc_overhead_limit_count(); 1041 1042 assert(to()->is_empty(), "to space should be empty now"); 1043 1044 adjust_desired_tenuring_threshold(); 1045 } else { 1046 handle_promotion_failed(gch, thread_state_set, gc_tracer); 1047 } 1048 // set new iteration safe limit for the survivor spaces 1049 from()->set_concurrent_iteration_safe_limit(from()->top()); 1050 to()->set_concurrent_iteration_safe_limit(to()->top()); 1051 1052 if (ResizePLAB) { 1053 plab_stats()->adjust_desired_plab_sz(n_workers); 1054 } 1055 1056 if (PrintGC && !PrintGCDetails) { 1057 gch->print_heap_change(gch_prev_used); 1058 } 1059 1060 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1061 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1062 1063 if (UseAdaptiveSizePolicy) { 1064 size_policy->minor_collection_end(gch->gc_cause()); 1065 size_policy->avg_survived()->sample(from()->used()); 1066 } 1067 1068 // We need to use a monotonically non-decreasing time in ms 1069 // or we will see time-warp warnings and os::javaTimeMillis() 1070 // does not guarantee monotonicity. 1071 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1072 update_time_of_last_gc(now); 1073 1074 SpecializationStats::print(); 1075 1076 rp->set_enqueuing_is_done(true); 1077 if (rp->processing_is_mt()) { 1078 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1079 rp->enqueue_discovered_references(&task_executor); 1080 } else { 1081 rp->enqueue_discovered_references(NULL); 1082 } 1083 rp->verify_no_references_recorded(); 1084 1085 gch->trace_heap_after_gc(&gc_tracer); 1086 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1087 1088 _gc_timer->register_gc_end(); 1089 1090 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1091 } 1092 1093 static int sum; 1094 void ParNewGeneration::waste_some_time() { 1095 for (int i = 0; i < 100; i++) { 1096 sum += i; 1097 } 1098 } 1099 1100 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1101 1102 // Because of concurrency, there are times where an object for which 1103 // "is_forwarded()" is true contains an "interim" forwarding pointer 1104 // value. Such a value will soon be overwritten with a real value. 1105 // This method requires "obj" to have a forwarding pointer, and waits, if 1106 // necessary for a real one to be inserted, and returns it. 1107 1108 oop ParNewGeneration::real_forwardee(oop obj) { 1109 oop forward_ptr = obj->forwardee(); 1110 if (forward_ptr != ClaimedForwardPtr) { 1111 return forward_ptr; 1112 } else { 1113 return real_forwardee_slow(obj); 1114 } 1115 } 1116 1117 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1118 // Spin-read if it is claimed but not yet written by another thread. 1119 oop forward_ptr = obj->forwardee(); 1120 while (forward_ptr == ClaimedForwardPtr) { 1121 waste_some_time(); 1122 assert(obj->is_forwarded(), "precondition"); 1123 forward_ptr = obj->forwardee(); 1124 } 1125 return forward_ptr; 1126 } 1127 1128 #ifdef ASSERT 1129 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1130 return 1131 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1132 || Universe::heap()->is_in_reserved(p); 1133 } 1134 #endif 1135 1136 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1137 if (m->must_be_preserved_for_promotion_failure(obj)) { 1138 // We should really have separate per-worker stacks, rather 1139 // than use locking of a common pair of stacks. 1140 MutexLocker ml(ParGCRareEvent_lock); 1141 preserve_mark(obj, m); 1142 } 1143 } 1144 1145 // Multiple GC threads may try to promote an object. If the object 1146 // is successfully promoted, a forwarding pointer will be installed in 1147 // the object in the young generation. This method claims the right 1148 // to install the forwarding pointer before it copies the object, 1149 // thus avoiding the need to undo the copy as in 1150 // copy_to_survivor_space_avoiding_with_undo. 1151 1152 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1153 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1154 // In the sequential version, this assert also says that the object is 1155 // not forwarded. That might not be the case here. It is the case that 1156 // the caller observed it to be not forwarded at some time in the past. 1157 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1158 1159 // The sequential code read "old->age()" below. That doesn't work here, 1160 // since the age is in the mark word, and that might be overwritten with 1161 // a forwarding pointer by a parallel thread. So we must save the mark 1162 // word in a local and then analyze it. 1163 oopDesc dummyOld; 1164 dummyOld.set_mark(m); 1165 assert(!dummyOld.is_forwarded(), 1166 "should not be called with forwarding pointer mark word."); 1167 1168 oop new_obj = NULL; 1169 oop forward_ptr; 1170 1171 // Try allocating obj in to-space (unless too old) 1172 if (dummyOld.age() < tenuring_threshold()) { 1173 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1174 if (new_obj == NULL) { 1175 set_survivor_overflow(true); 1176 } 1177 } 1178 1179 if (new_obj == NULL) { 1180 // Either to-space is full or we decided to promote 1181 // try allocating obj tenured 1182 1183 // Attempt to install a null forwarding pointer (atomically), 1184 // to claim the right to install the real forwarding pointer. 1185 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1186 if (forward_ptr != NULL) { 1187 // someone else beat us to it. 1188 return real_forwardee(old); 1189 } 1190 1191 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); 1192 1193 if (new_obj == NULL) { 1194 // promotion failed, forward to self 1195 _promotion_failed = true; 1196 new_obj = old; 1197 1198 preserve_mark_if_necessary(old, m); 1199 par_scan_state->register_promotion_failure(sz); 1200 } 1201 1202 old->forward_to(new_obj); 1203 forward_ptr = NULL; 1204 } else { 1205 // Is in to-space; do copying ourselves. 1206 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1207 forward_ptr = old->forward_to_atomic(new_obj); 1208 // Restore the mark word copied above. 1209 new_obj->set_mark(m); 1210 // Increment age if obj still in new generation 1211 new_obj->incr_age(); 1212 par_scan_state->age_table()->add(new_obj, sz); 1213 } 1214 assert(new_obj != NULL, "just checking"); 1215 1216 #ifndef PRODUCT 1217 // This code must come after the CAS test, or it will print incorrect 1218 // information. 1219 if (TraceScavenge) { 1220 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1221 is_in_reserved(new_obj) ? "copying" : "tenuring", 1222 new_obj->klass()->internal_name(), 1223 (void *)old, 1224 (void *)new_obj, 1225 new_obj->size()); 1226 } 1227 #endif 1228 1229 if (forward_ptr == NULL) { 1230 oop obj_to_push = new_obj; 1231 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1232 // Length field used as index of next element to be scanned. 1233 // Real length can be obtained from real_forwardee() 1234 arrayOop(old)->set_length(0); 1235 obj_to_push = old; 1236 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1237 "push forwarded object"); 1238 } 1239 // Push it on one of the queues of to-be-scanned objects. 1240 bool simulate_overflow = false; 1241 NOT_PRODUCT( 1242 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1243 // simulate a stack overflow 1244 simulate_overflow = true; 1245 } 1246 ) 1247 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1248 // Add stats for overflow pushes. 1249 if (Verbose && PrintGCDetails) { 1250 gclog_or_tty->print("queue overflow!\n"); 1251 } 1252 push_on_overflow_list(old, par_scan_state); 1253 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1254 } 1255 1256 return new_obj; 1257 } 1258 1259 // Oops. Someone beat us to it. Undo the allocation. Where did we 1260 // allocate it? 1261 if (is_in_reserved(new_obj)) { 1262 // Must be in to_space. 1263 assert(to()->is_in_reserved(new_obj), "Checking"); 1264 if (forward_ptr == ClaimedForwardPtr) { 1265 // Wait to get the real forwarding pointer value. 1266 forward_ptr = real_forwardee(old); 1267 } 1268 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1269 } 1270 1271 return forward_ptr; 1272 } 1273 1274 1275 // Multiple GC threads may try to promote the same object. If two 1276 // or more GC threads copy the object, only one wins the race to install 1277 // the forwarding pointer. The other threads have to undo their copy. 1278 1279 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1280 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1281 1282 // In the sequential version, this assert also says that the object is 1283 // not forwarded. That might not be the case here. It is the case that 1284 // the caller observed it to be not forwarded at some time in the past. 1285 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1286 1287 // The sequential code read "old->age()" below. That doesn't work here, 1288 // since the age is in the mark word, and that might be overwritten with 1289 // a forwarding pointer by a parallel thread. So we must save the mark 1290 // word here, install it in a local oopDesc, and then analyze it. 1291 oopDesc dummyOld; 1292 dummyOld.set_mark(m); 1293 assert(!dummyOld.is_forwarded(), 1294 "should not be called with forwarding pointer mark word."); 1295 1296 bool failed_to_promote = false; 1297 oop new_obj = NULL; 1298 oop forward_ptr; 1299 1300 // Try allocating obj in to-space (unless too old) 1301 if (dummyOld.age() < tenuring_threshold()) { 1302 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1303 if (new_obj == NULL) { 1304 set_survivor_overflow(true); 1305 } 1306 } 1307 1308 if (new_obj == NULL) { 1309 // Either to-space is full or we decided to promote 1310 // try allocating obj tenured 1311 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1312 old, m, sz); 1313 1314 if (new_obj == NULL) { 1315 // promotion failed, forward to self 1316 forward_ptr = old->forward_to_atomic(old); 1317 new_obj = old; 1318 1319 if (forward_ptr != NULL) { 1320 return forward_ptr; // someone else succeeded 1321 } 1322 1323 _promotion_failed = true; 1324 failed_to_promote = true; 1325 1326 preserve_mark_if_necessary(old, m); 1327 par_scan_state->register_promotion_failure(sz); 1328 } 1329 } else { 1330 // Is in to-space; do copying ourselves. 1331 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1332 // Restore the mark word copied above. 1333 new_obj->set_mark(m); 1334 // Increment age if new_obj still in new generation 1335 new_obj->incr_age(); 1336 par_scan_state->age_table()->add(new_obj, sz); 1337 } 1338 assert(new_obj != NULL, "just checking"); 1339 1340 #ifndef PRODUCT 1341 // This code must come after the CAS test, or it will print incorrect 1342 // information. 1343 if (TraceScavenge) { 1344 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1345 is_in_reserved(new_obj) ? "copying" : "tenuring", 1346 new_obj->klass()->internal_name(), 1347 (void*)old, 1348 (void*)new_obj, 1349 new_obj->size()); 1350 } 1351 #endif 1352 1353 // Now attempt to install the forwarding pointer (atomically). 1354 // We have to copy the mark word before overwriting with forwarding 1355 // ptr, so we can restore it below in the copy. 1356 if (!failed_to_promote) { 1357 forward_ptr = old->forward_to_atomic(new_obj); 1358 } 1359 1360 if (forward_ptr == NULL) { 1361 oop obj_to_push = new_obj; 1362 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1363 // Length field used as index of next element to be scanned. 1364 // Real length can be obtained from real_forwardee() 1365 arrayOop(old)->set_length(0); 1366 obj_to_push = old; 1367 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1368 "push forwarded object"); 1369 } 1370 // Push it on one of the queues of to-be-scanned objects. 1371 bool simulate_overflow = false; 1372 NOT_PRODUCT( 1373 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1374 // simulate a stack overflow 1375 simulate_overflow = true; 1376 } 1377 ) 1378 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1379 // Add stats for overflow pushes. 1380 push_on_overflow_list(old, par_scan_state); 1381 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1382 } 1383 1384 return new_obj; 1385 } 1386 1387 // Oops. Someone beat us to it. Undo the allocation. Where did we 1388 // allocate it? 1389 if (is_in_reserved(new_obj)) { 1390 // Must be in to_space. 1391 assert(to()->is_in_reserved(new_obj), "Checking"); 1392 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1393 } else { 1394 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1395 _old_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1396 (HeapWord*)new_obj, sz); 1397 } 1398 1399 return forward_ptr; 1400 } 1401 1402 #ifndef PRODUCT 1403 // It's OK to call this multi-threaded; the worst thing 1404 // that can happen is that we'll get a bunch of closely 1405 // spaced simulated overflows, but that's OK, in fact 1406 // probably good as it would exercise the overflow code 1407 // under contention. 1408 bool ParNewGeneration::should_simulate_overflow() { 1409 if (_overflow_counter-- <= 0) { // just being defensive 1410 _overflow_counter = ParGCWorkQueueOverflowInterval; 1411 return true; 1412 } else { 1413 return false; 1414 } 1415 } 1416 #endif 1417 1418 // In case we are using compressed oops, we need to be careful. 1419 // If the object being pushed is an object array, then its length 1420 // field keeps track of the "grey boundary" at which the next 1421 // incremental scan will be done (see ParGCArrayScanChunk). 1422 // When using compressed oops, this length field is kept in the 1423 // lower 32 bits of the erstwhile klass word and cannot be used 1424 // for the overflow chaining pointer (OCP below). As such the OCP 1425 // would itself need to be compressed into the top 32-bits in this 1426 // case. Unfortunately, see below, in the event that we have a 1427 // promotion failure, the node to be pushed on the list can be 1428 // outside of the Java heap, so the heap-based pointer compression 1429 // would not work (we would have potential aliasing between C-heap 1430 // and Java-heap pointers). For this reason, when using compressed 1431 // oops, we simply use a worker-thread-local, non-shared overflow 1432 // list in the form of a growable array, with a slightly different 1433 // overflow stack draining strategy. If/when we start using fat 1434 // stacks here, we can go back to using (fat) pointer chains 1435 // (although some performance comparisons would be useful since 1436 // single global lists have their own performance disadvantages 1437 // as we were made painfully aware not long ago, see 6786503). 1438 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1439 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1440 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1441 if (ParGCUseLocalOverflow) { 1442 // In the case of compressed oops, we use a private, not-shared 1443 // overflow stack. 1444 par_scan_state->push_on_overflow_stack(from_space_obj); 1445 } else { 1446 assert(!UseCompressedOops, "Error"); 1447 // if the object has been forwarded to itself, then we cannot 1448 // use the klass pointer for the linked list. Instead we have 1449 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1450 // XXX This is horribly inefficient when a promotion failure occurs 1451 // and should be fixed. XXX FIX ME !!! 1452 #ifndef PRODUCT 1453 Atomic::inc_ptr(&_num_par_pushes); 1454 assert(_num_par_pushes > 0, "Tautology"); 1455 #endif 1456 if (from_space_obj->forwardee() == from_space_obj) { 1457 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1458 listhead->forward_to(from_space_obj); 1459 from_space_obj = listhead; 1460 } 1461 oop observed_overflow_list = _overflow_list; 1462 oop cur_overflow_list; 1463 do { 1464 cur_overflow_list = observed_overflow_list; 1465 if (cur_overflow_list != BUSY) { 1466 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1467 } else { 1468 from_space_obj->set_klass_to_list_ptr(NULL); 1469 } 1470 observed_overflow_list = 1471 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1472 } while (cur_overflow_list != observed_overflow_list); 1473 } 1474 } 1475 1476 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1477 bool res; 1478 1479 if (ParGCUseLocalOverflow) { 1480 res = par_scan_state->take_from_overflow_stack(); 1481 } else { 1482 assert(!UseCompressedOops, "Error"); 1483 res = take_from_overflow_list_work(par_scan_state); 1484 } 1485 return res; 1486 } 1487 1488 1489 // *NOTE*: The overflow list manipulation code here and 1490 // in CMSCollector:: are very similar in shape, 1491 // except that in the CMS case we thread the objects 1492 // directly into the list via their mark word, and do 1493 // not need to deal with special cases below related 1494 // to chunking of object arrays and promotion failure 1495 // handling. 1496 // CR 6797058 has been filed to attempt consolidation of 1497 // the common code. 1498 // Because of the common code, if you make any changes in 1499 // the code below, please check the CMS version to see if 1500 // similar changes might be needed. 1501 // See CMSCollector::par_take_from_overflow_list() for 1502 // more extensive documentation comments. 1503 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1504 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1505 // How many to take? 1506 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1507 (size_t)ParGCDesiredObjsFromOverflowList); 1508 1509 assert(!UseCompressedOops, "Error"); 1510 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1511 if (_overflow_list == NULL) { 1512 return false; 1513 } 1514 1515 // Otherwise, there was something there; try claiming the list. 1516 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1517 // Trim off a prefix of at most objsFromOverflow items 1518 Thread* tid = Thread::current(); 1519 size_t spin_count = (size_t)ParallelGCThreads; 1520 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1521 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1522 // someone grabbed it before we did ... 1523 // ... we spin for a short while... 1524 os::sleep(tid, sleep_time_millis, false); 1525 if (_overflow_list == NULL) { 1526 // nothing left to take 1527 return false; 1528 } else if (_overflow_list != BUSY) { 1529 // try and grab the prefix 1530 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1531 } 1532 } 1533 if (prefix == NULL || prefix == BUSY) { 1534 // Nothing to take or waited long enough 1535 if (prefix == NULL) { 1536 // Write back the NULL in case we overwrote it with BUSY above 1537 // and it is still the same value. 1538 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1539 } 1540 return false; 1541 } 1542 assert(prefix != NULL && prefix != BUSY, "Error"); 1543 size_t i = 1; 1544 oop cur = prefix; 1545 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1546 i++; cur = cur->list_ptr_from_klass(); 1547 } 1548 1549 // Reattach remaining (suffix) to overflow list 1550 if (cur->klass_or_null() == NULL) { 1551 // Write back the NULL in lieu of the BUSY we wrote 1552 // above and it is still the same value. 1553 if (_overflow_list == BUSY) { 1554 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1555 } 1556 } else { 1557 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1558 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1559 cur->set_klass_to_list_ptr(NULL); // break off suffix 1560 // It's possible that the list is still in the empty(busy) state 1561 // we left it in a short while ago; in that case we may be 1562 // able to place back the suffix. 1563 oop observed_overflow_list = _overflow_list; 1564 oop cur_overflow_list = observed_overflow_list; 1565 bool attached = false; 1566 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1567 observed_overflow_list = 1568 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1569 if (cur_overflow_list == observed_overflow_list) { 1570 attached = true; 1571 break; 1572 } else cur_overflow_list = observed_overflow_list; 1573 } 1574 if (!attached) { 1575 // Too bad, someone else got in in between; we'll need to do a splice. 1576 // Find the last item of suffix list 1577 oop last = suffix; 1578 while (last->klass_or_null() != NULL) { 1579 last = last->list_ptr_from_klass(); 1580 } 1581 // Atomically prepend suffix to current overflow list 1582 observed_overflow_list = _overflow_list; 1583 do { 1584 cur_overflow_list = observed_overflow_list; 1585 if (cur_overflow_list != BUSY) { 1586 // Do the splice ... 1587 last->set_klass_to_list_ptr(cur_overflow_list); 1588 } else { // cur_overflow_list == BUSY 1589 last->set_klass_to_list_ptr(NULL); 1590 } 1591 observed_overflow_list = 1592 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1593 } while (cur_overflow_list != observed_overflow_list); 1594 } 1595 } 1596 1597 // Push objects on prefix list onto this thread's work queue 1598 assert(prefix != NULL && prefix != BUSY, "program logic"); 1599 cur = prefix; 1600 ssize_t n = 0; 1601 while (cur != NULL) { 1602 oop obj_to_push = cur->forwardee(); 1603 oop next = cur->list_ptr_from_klass(); 1604 cur->set_klass(obj_to_push->klass()); 1605 // This may be an array object that is self-forwarded. In that case, the list pointer 1606 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1607 if (!is_in_reserved(cur)) { 1608 // This can become a scaling bottleneck when there is work queue overflow coincident 1609 // with promotion failure. 1610 oopDesc* f = cur; 1611 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); 1612 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1613 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1614 obj_to_push = cur; 1615 } 1616 bool ok = work_q->push(obj_to_push); 1617 assert(ok, "Should have succeeded"); 1618 cur = next; 1619 n++; 1620 } 1621 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1622 #ifndef PRODUCT 1623 assert(_num_par_pushes >= n, "Too many pops?"); 1624 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1625 #endif 1626 return true; 1627 } 1628 #undef BUSY 1629 1630 void ParNewGeneration::ref_processor_init() { 1631 if (_ref_processor == NULL) { 1632 // Allocate and initialize a reference processor 1633 _ref_processor = 1634 new ReferenceProcessor(_reserved, // span 1635 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1636 (int) ParallelGCThreads, // mt processing degree 1637 refs_discovery_is_mt(), // mt discovery 1638 (int) ParallelGCThreads, // mt discovery degree 1639 refs_discovery_is_atomic(), // atomic_discovery 1640 NULL); // is_alive_non_header 1641 } 1642 } 1643 1644 const char* ParNewGeneration::name() const { 1645 return "par new generation"; 1646 }