1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_implementation/shared/promotionFailedInfo.hpp" 37 #include "gc_implementation/shared/spaceDecorator.hpp" 38 #include "memory/defNewGeneration.inline.hpp" 39 #include "memory/genCollectedHeap.hpp" 40 #include "memory/genOopClosures.inline.hpp" 41 #include "memory/generation.hpp" 42 #include "memory/generation.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/sharedHeap.hpp" 46 #include "memory/space.hpp" 47 #include "oops/objArrayOop.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "oops/oop.pcgc.inline.hpp" 50 #include "runtime/handles.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/thread.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 #include "utilities/workgroup.hpp" 57 58 #ifdef _MSC_VER 59 #pragma warning( push ) 60 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 61 #endif 62 ParScanThreadState::ParScanThreadState(Space* to_space_, 63 ParNewGeneration* gen_, 64 Generation* old_gen_, 65 int thread_num_, 66 ObjToScanQueueSet* work_queue_set_, 67 Stack<oop, mtGC>* overflow_stacks_, 68 size_t desired_plab_sz_, 69 ParallelTaskTerminator& term_) : 70 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 71 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 72 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 73 _ageTable(false), // false ==> not the global age table, no perf data. 74 _to_space_alloc_buffer(desired_plab_sz_), 75 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 76 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 77 _older_gen_closure(gen_, this), 78 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 79 &_to_space_root_closure, gen_, &_old_gen_root_closure, 80 work_queue_set_, &term_), 81 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 82 _keep_alive_closure(&_scan_weak_ref_closure), 83 _strong_roots_time(0.0), _term_time(0.0) 84 { 85 #if TASKQUEUE_STATS 86 _term_attempts = 0; 87 _overflow_refills = 0; 88 _overflow_refill_objs = 0; 89 #endif // TASKQUEUE_STATS 90 91 _survivor_chunk_array = 92 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 93 _hash_seed = 17; // Might want to take time-based random value. 94 _start = os::elapsedTime(); 95 _old_gen_closure.set_generation(old_gen_); 96 _old_gen_root_closure.set_generation(old_gen_); 97 } 98 #ifdef _MSC_VER 99 #pragma warning( pop ) 100 #endif 101 102 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 103 size_t plab_word_size) { 104 ChunkArray* sca = survivor_chunk_array(); 105 if (sca != NULL) { 106 // A non-null SCA implies that we want the PLAB data recorded. 107 sca->record_sample(plab_start, plab_word_size); 108 } 109 } 110 111 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 112 return new_obj->is_objArray() && 113 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 114 new_obj != old_obj; 115 } 116 117 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 118 assert(old->is_objArray(), "must be obj array"); 119 assert(old->is_forwarded(), "must be forwarded"); 120 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 121 assert(!old_gen()->is_in(old), "must be in young generation."); 122 123 objArrayOop obj = objArrayOop(old->forwardee()); 124 // Process ParGCArrayScanChunk elements now 125 // and push the remainder back onto queue 126 int start = arrayOop(old)->length(); 127 int end = obj->length(); 128 int remainder = end - start; 129 assert(start <= end, "just checking"); 130 if (remainder > 2 * ParGCArrayScanChunk) { 131 // Test above combines last partial chunk with a full chunk 132 end = start + ParGCArrayScanChunk; 133 arrayOop(old)->set_length(end); 134 // Push remainder. 135 bool ok = work_queue()->push(old); 136 assert(ok, "just popped, push must be okay"); 137 } else { 138 // Restore length so that it can be used if there 139 // is a promotion failure and forwarding pointers 140 // must be removed. 141 arrayOop(old)->set_length(end); 142 } 143 144 // process our set of indices (include header in first chunk) 145 // should make sure end is even (aligned to HeapWord in case of compressed oops) 146 if ((HeapWord *)obj < young_old_boundary()) { 147 // object is in to_space 148 obj->oop_iterate_range(&_to_space_closure, start, end); 149 } else { 150 // object is in old generation 151 obj->oop_iterate_range(&_old_gen_closure, start, end); 152 } 153 } 154 155 156 void ParScanThreadState::trim_queues(int max_size) { 157 ObjToScanQueue* queue = work_queue(); 158 do { 159 while (queue->size() > (juint)max_size) { 160 oop obj_to_scan; 161 if (queue->pop_local(obj_to_scan)) { 162 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 163 if (obj_to_scan->is_objArray() && 164 obj_to_scan->is_forwarded() && 165 obj_to_scan->forwardee() != obj_to_scan) { 166 scan_partial_array_and_push_remainder(obj_to_scan); 167 } else { 168 // object is in to_space 169 obj_to_scan->oop_iterate(&_to_space_closure); 170 } 171 } else { 172 // object is in old generation 173 obj_to_scan->oop_iterate(&_old_gen_closure); 174 } 175 } 176 } 177 // For the case of compressed oops, we have a private, non-shared 178 // overflow stack, so we eagerly drain it so as to more evenly 179 // distribute load early. Note: this may be good to do in 180 // general rather than delay for the final stealing phase. 181 // If applicable, we'll transfer a set of objects over to our 182 // work queue, allowing them to be stolen and draining our 183 // private overflow stack. 184 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 185 } 186 187 bool ParScanThreadState::take_from_overflow_stack() { 188 assert(ParGCUseLocalOverflow, "Else should not call"); 189 assert(young_gen()->overflow_list() == NULL, "Error"); 190 ObjToScanQueue* queue = work_queue(); 191 Stack<oop, mtGC>* const of_stack = overflow_stack(); 192 const size_t num_overflow_elems = of_stack->size(); 193 const size_t space_available = queue->max_elems() - queue->size(); 194 const size_t num_take_elems = MIN3(space_available / 4, 195 ParGCDesiredObjsFromOverflowList, 196 num_overflow_elems); 197 // Transfer the most recent num_take_elems from the overflow 198 // stack to our work queue. 199 for (size_t i = 0; i != num_take_elems; i++) { 200 oop cur = of_stack->pop(); 201 oop obj_to_push = cur->forwardee(); 202 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 203 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 204 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 205 if (should_be_partially_scanned(obj_to_push, cur)) { 206 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 207 obj_to_push = cur; 208 } 209 bool ok = queue->push(obj_to_push); 210 assert(ok, "Should have succeeded"); 211 } 212 assert(young_gen()->overflow_list() == NULL, "Error"); 213 return num_take_elems > 0; // was something transferred? 214 } 215 216 void ParScanThreadState::push_on_overflow_stack(oop p) { 217 assert(ParGCUseLocalOverflow, "Else should not call"); 218 overflow_stack()->push(p); 219 assert(young_gen()->overflow_list() == NULL, "Error"); 220 } 221 222 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 223 224 // Otherwise, if the object is small enough, try to reallocate the 225 // buffer. 226 HeapWord* obj = NULL; 227 if (!_to_space_full) { 228 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 229 Space* const sp = to_space(); 230 if (word_sz * 100 < 231 ParallelGCBufferWastePct * plab->word_sz()) { 232 // Is small enough; abandon this buffer and start a new one. 233 plab->retire(false, false); 234 size_t buf_size = plab->word_sz(); 235 HeapWord* buf_space = sp->par_allocate(buf_size); 236 if (buf_space == NULL) { 237 const size_t min_bytes = 238 ParGCAllocBuffer::min_size() << LogHeapWordSize; 239 size_t free_bytes = sp->free(); 240 while(buf_space == NULL && free_bytes >= min_bytes) { 241 buf_size = free_bytes >> LogHeapWordSize; 242 assert(buf_size == (size_t)align_object_size(buf_size), 243 "Invariant"); 244 buf_space = sp->par_allocate(buf_size); 245 free_bytes = sp->free(); 246 } 247 } 248 if (buf_space != NULL) { 249 plab->set_word_size(buf_size); 250 plab->set_buf(buf_space); 251 record_survivor_plab(buf_space, buf_size); 252 obj = plab->allocate(word_sz); 253 // Note that we cannot compare buf_size < word_sz below 254 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 255 assert(obj != NULL || plab->words_remaining() < word_sz, 256 "Else should have been able to allocate"); 257 // It's conceivable that we may be able to use the 258 // buffer we just grabbed for subsequent small requests 259 // even if not for this one. 260 } else { 261 // We're used up. 262 _to_space_full = true; 263 } 264 265 } else { 266 // Too large; allocate the object individually. 267 obj = sp->par_allocate(word_sz); 268 } 269 } 270 return obj; 271 } 272 273 274 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 275 size_t word_sz) { 276 // Is the alloc in the current alloc buffer? 277 if (to_space_alloc_buffer()->contains(obj)) { 278 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 279 "Should contain whole object."); 280 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 281 } else { 282 CollectedHeap::fill_with_object(obj, word_sz); 283 } 284 } 285 286 void ParScanThreadState::print_promotion_failure_size() { 287 if (_promotion_failed_info.promotion_failed() && PrintPromotionFailure) { 288 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 289 _thread_num, _promotion_failed_info.first_size()); 290 } 291 } 292 293 class ParScanThreadStateSet: private ResourceArray { 294 public: 295 // Initializes states for the specified number of threads; 296 ParScanThreadStateSet(int num_threads, 297 Space& to_space, 298 ParNewGeneration& gen, 299 Generation& old_gen, 300 ObjToScanQueueSet& queue_set, 301 Stack<oop, mtGC>* overflow_stacks_, 302 size_t desired_plab_sz, 303 ParallelTaskTerminator& term); 304 305 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 306 307 inline ParScanThreadState& thread_state(int i); 308 309 void trace_promotion_failed(YoungGCTracer& gc_tracer); 310 void reset(int active_workers, bool promotion_failed); 311 void flush(); 312 313 #if TASKQUEUE_STATS 314 static void 315 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 316 void print_termination_stats(outputStream* const st = gclog_or_tty); 317 static void 318 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 319 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 320 void reset_stats(); 321 #endif // TASKQUEUE_STATS 322 323 private: 324 ParallelTaskTerminator& _term; 325 ParNewGeneration& _gen; 326 Generation& _next_gen; 327 public: 328 bool is_valid(int id) const { return id < length(); } 329 ParallelTaskTerminator* terminator() { return &_term; } 330 }; 331 332 333 ParScanThreadStateSet::ParScanThreadStateSet( 334 int num_threads, Space& to_space, ParNewGeneration& gen, 335 Generation& old_gen, ObjToScanQueueSet& queue_set, 336 Stack<oop, mtGC>* overflow_stacks, 337 size_t desired_plab_sz, ParallelTaskTerminator& term) 338 : ResourceArray(sizeof(ParScanThreadState), num_threads), 339 _gen(gen), _next_gen(old_gen), _term(term) 340 { 341 assert(num_threads > 0, "sanity check!"); 342 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 343 "overflow_stack allocation mismatch"); 344 // Initialize states. 345 for (int i = 0; i < num_threads; ++i) { 346 new ((ParScanThreadState*)_data + i) 347 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 348 overflow_stacks, desired_plab_sz, term); 349 } 350 } 351 352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 353 { 354 assert(i >= 0 && i < length(), "sanity check!"); 355 return ((ParScanThreadState*)_data)[i]; 356 } 357 358 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { 359 for (int i = 0; i < length(); ++i) { 360 if (thread_state(i).promotion_failed()) { 361 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); 362 thread_state(i).promotion_failed_info().reset(); 363 } 364 } 365 } 366 367 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) 368 { 369 _term.reset_for_reuse(active_threads); 370 if (promotion_failed) { 371 for (int i = 0; i < length(); ++i) { 372 thread_state(i).print_promotion_failure_size(); 373 } 374 } 375 } 376 377 #if TASKQUEUE_STATS 378 void 379 ParScanThreadState::reset_stats() 380 { 381 taskqueue_stats().reset(); 382 _term_attempts = 0; 383 _overflow_refills = 0; 384 _overflow_refill_objs = 0; 385 } 386 387 void ParScanThreadStateSet::reset_stats() 388 { 389 for (int i = 0; i < length(); ++i) { 390 thread_state(i).reset_stats(); 391 } 392 } 393 394 void 395 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 396 { 397 st->print_raw_cr("GC Termination Stats"); 398 st->print_raw_cr(" elapsed --strong roots-- " 399 "-------termination-------"); 400 st->print_raw_cr("thr ms ms % " 401 " ms % attempts"); 402 st->print_raw_cr("--- --------- --------- ------ " 403 "--------- ------ --------"); 404 } 405 406 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 407 { 408 print_termination_stats_hdr(st); 409 410 for (int i = 0; i < length(); ++i) { 411 const ParScanThreadState & pss = thread_state(i); 412 const double elapsed_ms = pss.elapsed_time() * 1000.0; 413 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 414 const double term_ms = pss.term_time() * 1000.0; 415 st->print_cr("%3d %9.2f %9.2f %6.2f " 416 "%9.2f %6.2f " SIZE_FORMAT_W(8), 417 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 418 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 419 } 420 } 421 422 // Print stats related to work queue activity. 423 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 424 { 425 st->print_raw_cr("GC Task Stats"); 426 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 427 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 428 } 429 430 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 431 { 432 print_taskqueue_stats_hdr(st); 433 434 TaskQueueStats totals; 435 for (int i = 0; i < length(); ++i) { 436 const ParScanThreadState & pss = thread_state(i); 437 const TaskQueueStats & stats = pss.taskqueue_stats(); 438 st->print("%3d ", i); stats.print(st); st->cr(); 439 totals += stats; 440 441 if (pss.overflow_refills() > 0) { 442 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 443 SIZE_FORMAT_W(10) " overflow objects", 444 pss.overflow_refills(), pss.overflow_refill_objs()); 445 } 446 } 447 st->print("tot "); totals.print(st); st->cr(); 448 449 DEBUG_ONLY(totals.verify()); 450 } 451 #endif // TASKQUEUE_STATS 452 453 void ParScanThreadStateSet::flush() 454 { 455 // Work in this loop should be kept as lightweight as 456 // possible since this might otherwise become a bottleneck 457 // to scaling. Should we add heavy-weight work into this 458 // loop, consider parallelizing the loop into the worker threads. 459 for (int i = 0; i < length(); ++i) { 460 ParScanThreadState& par_scan_state = thread_state(i); 461 462 // Flush stats related to To-space PLAB activity and 463 // retire the last buffer. 464 par_scan_state.to_space_alloc_buffer()-> 465 flush_stats_and_retire(_gen.plab_stats(), 466 true /* end_of_gc */, 467 false /* retain */); 468 469 // Every thread has its own age table. We need to merge 470 // them all into one. 471 ageTable *local_table = par_scan_state.age_table(); 472 _gen.age_table()->merge(local_table); 473 474 // Inform old gen that we're done. 475 _next_gen.par_promote_alloc_done(i); 476 _next_gen.par_oop_since_save_marks_iterate_done(i); 477 } 478 479 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 480 // We need to call this even when ResizeOldPLAB is disabled 481 // so as to avoid breaking some asserts. While we may be able 482 // to avoid this by reorganizing the code a bit, I am loathe 483 // to do that unless we find cases where ergo leads to bad 484 // performance. 485 CFLS_LAB::compute_desired_plab_size(); 486 } 487 } 488 489 ParScanClosure::ParScanClosure(ParNewGeneration* g, 490 ParScanThreadState* par_scan_state) : 491 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) 492 { 493 assert(_g->level() == 0, "Optimized for youngest generation"); 494 _boundary = _g->reserved().end(); 495 } 496 497 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 498 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 499 500 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 501 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 502 503 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 504 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 505 506 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 507 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 508 509 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 510 ParScanThreadState* par_scan_state) 511 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 512 {} 513 514 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 515 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 516 517 #ifdef WIN32 518 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 519 #endif 520 521 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 522 ParScanThreadState* par_scan_state_, 523 ParScanWithoutBarrierClosure* to_space_closure_, 524 ParScanWithBarrierClosure* old_gen_closure_, 525 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 526 ParNewGeneration* par_gen_, 527 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 528 ObjToScanQueueSet* task_queues_, 529 ParallelTaskTerminator* terminator_) : 530 531 _par_scan_state(par_scan_state_), 532 _to_space_closure(to_space_closure_), 533 _old_gen_closure(old_gen_closure_), 534 _to_space_root_closure(to_space_root_closure_), 535 _old_gen_root_closure(old_gen_root_closure_), 536 _par_gen(par_gen_), 537 _task_queues(task_queues_), 538 _terminator(terminator_) 539 {} 540 541 void ParEvacuateFollowersClosure::do_void() { 542 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 543 544 while (true) { 545 546 // Scan to-space and old-gen objs until we run out of both. 547 oop obj_to_scan; 548 par_scan_state()->trim_queues(0); 549 550 // We have no local work, attempt to steal from other threads. 551 552 // attempt to steal work from promoted. 553 if (task_queues()->steal(par_scan_state()->thread_num(), 554 par_scan_state()->hash_seed(), 555 obj_to_scan)) { 556 bool res = work_q->push(obj_to_scan); 557 assert(res, "Empty queue should have room for a push."); 558 559 // if successful, goto Start. 560 continue; 561 562 // try global overflow list. 563 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 564 continue; 565 } 566 567 // Otherwise, offer termination. 568 par_scan_state()->start_term_time(); 569 if (terminator()->offer_termination()) break; 570 par_scan_state()->end_term_time(); 571 } 572 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 573 "Broken overflow list?"); 574 // Finish the last termination pause. 575 par_scan_state()->end_term_time(); 576 } 577 578 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 579 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 580 AbstractGangTask("ParNewGeneration collection"), 581 _gen(gen), _next_gen(next_gen), 582 _young_old_boundary(young_old_boundary), 583 _state_set(state_set) 584 {} 585 586 // Reset the terminator for the given number of 587 // active threads. 588 void ParNewGenTask::set_for_termination(int active_workers) { 589 _state_set->reset(active_workers, _gen->promotion_failed()); 590 // Should the heap be passed in? There's only 1 for now so 591 // grab it instead. 592 GenCollectedHeap* gch = GenCollectedHeap::heap(); 593 gch->set_n_termination(active_workers); 594 } 595 596 // The "i" passed to this method is the part of the work for 597 // this thread. It is not the worker ID. The "i" is derived 598 // from _started_workers which is incremented in internal_note_start() 599 // called in GangWorker loop() and which is called under the 600 // which is called under the protection of the gang monitor and is 601 // called after a task is started. So "i" is based on 602 // first-come-first-served. 603 604 void ParNewGenTask::work(uint worker_id) { 605 GenCollectedHeap* gch = GenCollectedHeap::heap(); 606 // Since this is being done in a separate thread, need new resource 607 // and handle marks. 608 ResourceMark rm; 609 HandleMark hm; 610 // We would need multiple old-gen queues otherwise. 611 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 612 613 Generation* old_gen = gch->next_gen(_gen); 614 615 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 616 assert(_state_set->is_valid(worker_id), "Should not have been called"); 617 618 par_scan_state.set_young_old_boundary(_young_old_boundary); 619 620 par_scan_state.start_strong_roots(); 621 gch->gen_process_strong_roots(_gen->level(), 622 true, // Process younger gens, if any, 623 // as strong roots. 624 false, // no scope; this is parallel code 625 false, // not collecting perm generation. 626 SharedHeap::SO_AllClasses, 627 &par_scan_state.to_space_root_closure(), 628 true, // walk *all* scavengable nmethods 629 &par_scan_state.older_gen_closure()); 630 par_scan_state.end_strong_roots(); 631 632 // "evacuate followers". 633 par_scan_state.evacuate_followers_closure().do_void(); 634 } 635 636 #ifdef _MSC_VER 637 #pragma warning( push ) 638 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 639 #endif 640 ParNewGeneration:: 641 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 642 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 643 _overflow_list(NULL), 644 _is_alive_closure(this), 645 _plab_stats(YoungPLABSize, PLABWeight) 646 { 647 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 648 NOT_PRODUCT(_num_par_pushes = 0;) 649 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 650 guarantee(_task_queues != NULL, "task_queues allocation failure."); 651 652 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 653 ObjToScanQueue *q = new ObjToScanQueue(); 654 guarantee(q != NULL, "work_queue Allocation failure."); 655 _task_queues->register_queue(i1, q); 656 } 657 658 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 659 _task_queues->queue(i2)->initialize(); 660 661 _overflow_stacks = NULL; 662 if (ParGCUseLocalOverflow) { 663 664 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 665 // with ',' 666 typedef Stack<oop, mtGC> GCOopStack; 667 668 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 669 for (size_t i = 0; i < ParallelGCThreads; ++i) { 670 new (_overflow_stacks + i) Stack<oop, mtGC>(); 671 } 672 } 673 674 if (UsePerfData) { 675 EXCEPTION_MARK; 676 ResourceMark rm; 677 678 const char* cname = 679 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 680 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 681 ParallelGCThreads, CHECK); 682 } 683 } 684 #ifdef _MSC_VER 685 #pragma warning( pop ) 686 #endif 687 688 // ParNewGeneration:: 689 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 690 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 691 692 template <class T> 693 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 694 #ifdef ASSERT 695 { 696 assert(!oopDesc::is_null(*p), "expected non-null ref"); 697 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 698 // We never expect to see a null reference being processed 699 // as a weak reference. 700 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 701 } 702 #endif // ASSERT 703 704 _par_cl->do_oop_nv(p); 705 706 if (Universe::heap()->is_in_reserved(p)) { 707 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 708 _rs->write_ref_field_gc_par(p, obj); 709 } 710 } 711 712 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 713 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 714 715 // ParNewGeneration:: 716 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 717 DefNewGeneration::KeepAliveClosure(cl) {} 718 719 template <class T> 720 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 721 #ifdef ASSERT 722 { 723 assert(!oopDesc::is_null(*p), "expected non-null ref"); 724 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 725 // We never expect to see a null reference being processed 726 // as a weak reference. 727 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 728 } 729 #endif // ASSERT 730 731 _cl->do_oop_nv(p); 732 733 if (Universe::heap()->is_in_reserved(p)) { 734 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 735 _rs->write_ref_field_gc_par(p, obj); 736 } 737 } 738 739 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 740 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 741 742 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 743 T heap_oop = oopDesc::load_heap_oop(p); 744 if (!oopDesc::is_null(heap_oop)) { 745 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 746 if ((HeapWord*)obj < _boundary) { 747 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 748 oop new_obj = obj->is_forwarded() 749 ? obj->forwardee() 750 : _g->DefNewGeneration::copy_to_survivor_space(obj); 751 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 752 } 753 if (_gc_barrier) { 754 // If p points to a younger generation, mark the card. 755 if ((HeapWord*)obj < _gen_boundary) { 756 _rs->write_ref_field_gc_par(p, obj); 757 } 758 } 759 } 760 } 761 762 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 763 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 764 765 class ParNewRefProcTaskProxy: public AbstractGangTask { 766 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 767 public: 768 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 769 Generation& next_gen, 770 HeapWord* young_old_boundary, 771 ParScanThreadStateSet& state_set); 772 773 private: 774 virtual void work(uint worker_id); 775 virtual void set_for_termination(int active_workers) { 776 _state_set.terminator()->reset_for_reuse(active_workers); 777 } 778 private: 779 ParNewGeneration& _gen; 780 ProcessTask& _task; 781 Generation& _next_gen; 782 HeapWord* _young_old_boundary; 783 ParScanThreadStateSet& _state_set; 784 }; 785 786 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 787 ProcessTask& task, ParNewGeneration& gen, 788 Generation& next_gen, 789 HeapWord* young_old_boundary, 790 ParScanThreadStateSet& state_set) 791 : AbstractGangTask("ParNewGeneration parallel reference processing"), 792 _gen(gen), 793 _task(task), 794 _next_gen(next_gen), 795 _young_old_boundary(young_old_boundary), 796 _state_set(state_set) 797 { 798 } 799 800 void ParNewRefProcTaskProxy::work(uint worker_id) 801 { 802 ResourceMark rm; 803 HandleMark hm; 804 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 805 par_scan_state.set_young_old_boundary(_young_old_boundary); 806 _task.work(worker_id, par_scan_state.is_alive_closure(), 807 par_scan_state.keep_alive_closure(), 808 par_scan_state.evacuate_followers_closure()); 809 } 810 811 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 812 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 813 EnqueueTask& _task; 814 815 public: 816 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 817 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 818 _task(task) 819 { } 820 821 virtual void work(uint worker_id) 822 { 823 _task.work(worker_id); 824 } 825 }; 826 827 828 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 829 { 830 GenCollectedHeap* gch = GenCollectedHeap::heap(); 831 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 832 "not a generational heap"); 833 FlexibleWorkGang* workers = gch->workers(); 834 assert(workers != NULL, "Need parallel worker threads."); 835 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 836 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 837 _generation.reserved().end(), _state_set); 838 workers->run_task(&rp_task); 839 _state_set.reset(0 /* bad value in debug if not reset */, 840 _generation.promotion_failed()); 841 } 842 843 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 844 { 845 GenCollectedHeap* gch = GenCollectedHeap::heap(); 846 FlexibleWorkGang* workers = gch->workers(); 847 assert(workers != NULL, "Need parallel worker threads."); 848 ParNewRefEnqueueTaskProxy enq_task(task); 849 workers->run_task(&enq_task); 850 } 851 852 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 853 { 854 _state_set.flush(); 855 GenCollectedHeap* gch = GenCollectedHeap::heap(); 856 gch->set_par_threads(0); // 0 ==> non-parallel. 857 gch->save_marks(); 858 } 859 860 ScanClosureWithParBarrier:: 861 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 862 ScanClosure(g, gc_barrier) {} 863 864 EvacuateFollowersClosureGeneral:: 865 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 866 OopsInGenClosure* cur, 867 OopsInGenClosure* older) : 868 _gch(gch), _level(level), 869 _scan_cur_or_nonheap(cur), _scan_older(older) 870 {} 871 872 void EvacuateFollowersClosureGeneral::do_void() { 873 do { 874 // Beware: this call will lead to closure applications via virtual 875 // calls. 876 _gch->oop_since_save_marks_iterate(_level, 877 _scan_cur_or_nonheap, 878 _scan_older); 879 } while (!_gch->no_allocs_since_save_marks(_level)); 880 } 881 882 883 // A Generation that does parallel young-gen collection. 884 885 bool ParNewGeneration::_avoid_promotion_undo = false; 886 887 void ParNewGeneration::adjust_desired_tenuring_threshold() { 888 // Set the desired survivor size to half the real survivor space 889 _tenuring_threshold = 890 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 891 } 892 893 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { 894 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 895 _promo_failure_scan_stack.clear(true); // Clear cached segments. 896 897 remove_forwarding_pointers(); 898 if (PrintGCDetails) { 899 gclog_or_tty->print(" (promotion failed)"); 900 } 901 // All the spaces are in play for mark-sweep. 902 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 903 from()->set_next_compaction_space(to()); 904 gch->set_incremental_collection_failed(); 905 // Inform the next generation that a promotion failure occurred. 906 _next_gen->promotion_failure_occurred(); 907 908 // Trace promotion failure in the parallel GC threads 909 thread_state_set.trace_promotion_failed(gc_tracer); 910 // Single threaded code may have reported promotion failure to the global state 911 if (_promotion_failed_info.promotion_failed()) { 912 gc_tracer.report_promotion_failed(_promotion_failed_info); 913 } 914 // Reset the PromotionFailureALot counters. 915 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 916 } 917 918 void ParNewGeneration::collect(bool full, 919 bool clear_all_soft_refs, 920 size_t size, 921 bool is_tlab) { 922 assert(full || size > 0, "otherwise we don't want to collect"); 923 924 GenCollectedHeap* gch = GenCollectedHeap::heap(); 925 926 _gc_timer->register_gc_start(os::elapsed_counter()); 927 928 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 929 "not a CMS generational heap"); 930 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 931 FlexibleWorkGang* workers = gch->workers(); 932 assert(workers != NULL, "Need workgang for parallel work"); 933 int active_workers = 934 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 935 workers->active_workers(), 936 Threads::number_of_non_daemon_threads()); 937 workers->set_active_workers(active_workers); 938 _next_gen = gch->next_gen(this); 939 assert(_next_gen != NULL, 940 "This must be the youngest gen, and not the only gen"); 941 assert(gch->n_gens() == 2, 942 "Par collection currently only works with single older gen."); 943 // Do we have to avoid promotion_undo? 944 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 945 set_avoid_promotion_undo(true); 946 } 947 948 // If the next generation is too full to accommodate worst-case promotion 949 // from this generation, pass on collection; let the next generation 950 // do it. 951 if (!collection_attempt_is_safe()) { 952 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 953 return; 954 } 955 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 956 957 ParNewTracer gc_tracer; 958 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 959 gch->trace_heap_before_gc(&gc_tracer); 960 961 init_assuming_no_promotion_failure(); 962 963 if (UseAdaptiveSizePolicy) { 964 set_survivor_overflow(false); 965 size_policy->minor_collection_begin(); 966 } 967 968 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 969 // Capture heap used before collection (for printing). 970 size_t gch_prev_used = gch->used(); 971 972 SpecializationStats::clear(); 973 974 age_table()->clear(); 975 to()->clear(SpaceDecorator::Mangle); 976 977 gch->save_marks(); 978 assert(workers != NULL, "Need parallel worker threads."); 979 int n_workers = active_workers; 980 981 // Set the correct parallelism (number of queues) in the reference processor 982 ref_processor()->set_active_mt_degree(n_workers); 983 984 // Always set the terminator for the active number of workers 985 // because only those workers go through the termination protocol. 986 ParallelTaskTerminator _term(n_workers, task_queues()); 987 ParScanThreadStateSet thread_state_set(workers->active_workers(), 988 *to(), *this, *_next_gen, *task_queues(), 989 _overflow_stacks, desired_plab_sz(), _term); 990 991 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 992 gch->set_par_threads(n_workers); 993 gch->rem_set()->prepare_for_younger_refs_iterate(true); 994 // It turns out that even when we're using 1 thread, doing the work in a 995 // separate thread causes wide variance in run times. We can't help this 996 // in the multi-threaded case, but we special-case n=1 here to get 997 // repeatable measurements of the 1-thread overhead of the parallel code. 998 if (n_workers > 1) { 999 GenCollectedHeap::StrongRootsScope srs(gch); 1000 workers->run_task(&tsk); 1001 } else { 1002 GenCollectedHeap::StrongRootsScope srs(gch); 1003 tsk.work(0); 1004 } 1005 thread_state_set.reset(0 /* Bad value in debug if not reset */, 1006 promotion_failed()); 1007 1008 // Process (weak) reference objects found during scavenge. 1009 ReferenceProcessor* rp = ref_processor(); 1010 IsAliveClosure is_alive(this); 1011 ScanWeakRefClosure scan_weak_ref(this); 1012 KeepAliveClosure keep_alive(&scan_weak_ref); 1013 ScanClosure scan_without_gc_barrier(this, false); 1014 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 1015 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 1016 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 1017 &scan_without_gc_barrier, &scan_with_gc_barrier); 1018 rp->setup_policy(clear_all_soft_refs); 1019 // Can the mt_degree be set later (at run_task() time would be best)? 1020 rp->set_active_mt_degree(active_workers); 1021 ReferenceProcessorStats stats; 1022 if (rp->processing_is_mt()) { 1023 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1024 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1025 &evacuate_followers, &task_executor, 1026 _gc_timer); 1027 } else { 1028 thread_state_set.flush(); 1029 gch->set_par_threads(0); // 0 ==> non-parallel. 1030 gch->save_marks(); 1031 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1032 &evacuate_followers, NULL, 1033 _gc_timer); 1034 } 1035 gc_tracer.report_gc_reference_stats(stats); 1036 if (!promotion_failed()) { 1037 // Swap the survivor spaces. 1038 eden()->clear(SpaceDecorator::Mangle); 1039 from()->clear(SpaceDecorator::Mangle); 1040 if (ZapUnusedHeapArea) { 1041 // This is now done here because of the piece-meal mangling which 1042 // can check for valid mangling at intermediate points in the 1043 // collection(s). When a minor collection fails to collect 1044 // sufficient space resizing of the young generation can occur 1045 // an redistribute the spaces in the young generation. Mangle 1046 // here so that unzapped regions don't get distributed to 1047 // other spaces. 1048 to()->mangle_unused_area(); 1049 } 1050 swap_spaces(); 1051 1052 // A successful scavenge should restart the GC time limit count which is 1053 // for full GC's. 1054 size_policy->reset_gc_overhead_limit_count(); 1055 1056 assert(to()->is_empty(), "to space should be empty now"); 1057 } else { 1058 handle_promotion_failed(gch, thread_state_set, gc_tracer); 1059 } 1060 // set new iteration safe limit for the survivor spaces 1061 from()->set_concurrent_iteration_safe_limit(from()->top()); 1062 to()->set_concurrent_iteration_safe_limit(to()->top()); 1063 1064 adjust_desired_tenuring_threshold(); 1065 if (ResizePLAB) { 1066 plab_stats()->adjust_desired_plab_sz(n_workers); 1067 } 1068 1069 if (PrintGC && !PrintGCDetails) { 1070 gch->print_heap_change(gch_prev_used); 1071 } 1072 1073 if (PrintGCDetails && ParallelGCVerbose) { 1074 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1075 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1076 } 1077 1078 if (UseAdaptiveSizePolicy) { 1079 size_policy->minor_collection_end(gch->gc_cause()); 1080 size_policy->avg_survived()->sample(from()->used()); 1081 } 1082 1083 // We need to use a monotonically non-deccreasing time in ms 1084 // or we will see time-warp warnings and os::javaTimeMillis() 1085 // does not guarantee monotonicity. 1086 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1087 update_time_of_last_gc(now); 1088 1089 SpecializationStats::print(); 1090 1091 rp->set_enqueuing_is_done(true); 1092 if (rp->processing_is_mt()) { 1093 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1094 rp->enqueue_discovered_references(&task_executor); 1095 } else { 1096 rp->enqueue_discovered_references(NULL); 1097 } 1098 rp->verify_no_references_recorded(); 1099 1100 gch->trace_heap_after_gc(&gc_tracer); 1101 1102 _gc_timer->register_gc_end(os::elapsed_counter()); 1103 1104 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1105 } 1106 1107 static int sum; 1108 void ParNewGeneration::waste_some_time() { 1109 for (int i = 0; i < 100; i++) { 1110 sum += i; 1111 } 1112 } 1113 1114 static const oop ClaimedForwardPtr = oop(0x4); 1115 1116 // Because of concurrency, there are times where an object for which 1117 // "is_forwarded()" is true contains an "interim" forwarding pointer 1118 // value. Such a value will soon be overwritten with a real value. 1119 // This method requires "obj" to have a forwarding pointer, and waits, if 1120 // necessary for a real one to be inserted, and returns it. 1121 1122 oop ParNewGeneration::real_forwardee(oop obj) { 1123 oop forward_ptr = obj->forwardee(); 1124 if (forward_ptr != ClaimedForwardPtr) { 1125 return forward_ptr; 1126 } else { 1127 return real_forwardee_slow(obj); 1128 } 1129 } 1130 1131 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1132 // Spin-read if it is claimed but not yet written by another thread. 1133 oop forward_ptr = obj->forwardee(); 1134 while (forward_ptr == ClaimedForwardPtr) { 1135 waste_some_time(); 1136 assert(obj->is_forwarded(), "precondition"); 1137 forward_ptr = obj->forwardee(); 1138 } 1139 return forward_ptr; 1140 } 1141 1142 #ifdef ASSERT 1143 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1144 return 1145 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1146 || Universe::heap()->is_in_reserved(p); 1147 } 1148 #endif 1149 1150 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1151 if (m->must_be_preserved_for_promotion_failure(obj)) { 1152 // We should really have separate per-worker stacks, rather 1153 // than use locking of a common pair of stacks. 1154 MutexLocker ml(ParGCRareEvent_lock); 1155 preserve_mark(obj, m); 1156 } 1157 } 1158 1159 // Multiple GC threads may try to promote an object. If the object 1160 // is successfully promoted, a forwarding pointer will be installed in 1161 // the object in the young generation. This method claims the right 1162 // to install the forwarding pointer before it copies the object, 1163 // thus avoiding the need to undo the copy as in 1164 // copy_to_survivor_space_avoiding_with_undo. 1165 1166 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1167 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1168 // In the sequential version, this assert also says that the object is 1169 // not forwarded. That might not be the case here. It is the case that 1170 // the caller observed it to be not forwarded at some time in the past. 1171 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1172 1173 // The sequential code read "old->age()" below. That doesn't work here, 1174 // since the age is in the mark word, and that might be overwritten with 1175 // a forwarding pointer by a parallel thread. So we must save the mark 1176 // word in a local and then analyze it. 1177 oopDesc dummyOld; 1178 dummyOld.set_mark(m); 1179 assert(!dummyOld.is_forwarded(), 1180 "should not be called with forwarding pointer mark word."); 1181 1182 oop new_obj = NULL; 1183 oop forward_ptr; 1184 1185 // Try allocating obj in to-space (unless too old) 1186 if (dummyOld.age() < tenuring_threshold()) { 1187 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1188 if (new_obj == NULL) { 1189 set_survivor_overflow(true); 1190 } 1191 } 1192 1193 if (new_obj == NULL) { 1194 // Either to-space is full or we decided to promote 1195 // try allocating obj tenured 1196 1197 // Attempt to install a null forwarding pointer (atomically), 1198 // to claim the right to install the real forwarding pointer. 1199 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1200 if (forward_ptr != NULL) { 1201 // someone else beat us to it. 1202 return real_forwardee(old); 1203 } 1204 1205 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1206 old, m, sz); 1207 1208 if (new_obj == NULL) { 1209 // promotion failed, forward to self 1210 _promotion_failed = true; 1211 new_obj = old; 1212 1213 preserve_mark_if_necessary(old, m); 1214 par_scan_state->register_promotion_failure(sz); 1215 } 1216 1217 old->forward_to(new_obj); 1218 forward_ptr = NULL; 1219 } else { 1220 // Is in to-space; do copying ourselves. 1221 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1222 forward_ptr = old->forward_to_atomic(new_obj); 1223 // Restore the mark word copied above. 1224 new_obj->set_mark(m); 1225 // Increment age if obj still in new generation 1226 new_obj->incr_age(); 1227 par_scan_state->age_table()->add(new_obj, sz); 1228 } 1229 assert(new_obj != NULL, "just checking"); 1230 1231 if (forward_ptr == NULL) { 1232 oop obj_to_push = new_obj; 1233 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1234 // Length field used as index of next element to be scanned. 1235 // Real length can be obtained from real_forwardee() 1236 arrayOop(old)->set_length(0); 1237 obj_to_push = old; 1238 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1239 "push forwarded object"); 1240 } 1241 // Push it on one of the queues of to-be-scanned objects. 1242 bool simulate_overflow = false; 1243 NOT_PRODUCT( 1244 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1245 // simulate a stack overflow 1246 simulate_overflow = true; 1247 } 1248 ) 1249 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1250 // Add stats for overflow pushes. 1251 if (Verbose && PrintGCDetails) { 1252 gclog_or_tty->print("queue overflow!\n"); 1253 } 1254 push_on_overflow_list(old, par_scan_state); 1255 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1256 } 1257 1258 return new_obj; 1259 } 1260 1261 // Oops. Someone beat us to it. Undo the allocation. Where did we 1262 // allocate it? 1263 if (is_in_reserved(new_obj)) { 1264 // Must be in to_space. 1265 assert(to()->is_in_reserved(new_obj), "Checking"); 1266 if (forward_ptr == ClaimedForwardPtr) { 1267 // Wait to get the real forwarding pointer value. 1268 forward_ptr = real_forwardee(old); 1269 } 1270 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1271 } 1272 1273 return forward_ptr; 1274 } 1275 1276 1277 // Multiple GC threads may try to promote the same object. If two 1278 // or more GC threads copy the object, only one wins the race to install 1279 // the forwarding pointer. The other threads have to undo their copy. 1280 1281 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1282 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1283 1284 // In the sequential version, this assert also says that the object is 1285 // not forwarded. That might not be the case here. It is the case that 1286 // the caller observed it to be not forwarded at some time in the past. 1287 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1288 1289 // The sequential code read "old->age()" below. That doesn't work here, 1290 // since the age is in the mark word, and that might be overwritten with 1291 // a forwarding pointer by a parallel thread. So we must save the mark 1292 // word here, install it in a local oopDesc, and then analyze it. 1293 oopDesc dummyOld; 1294 dummyOld.set_mark(m); 1295 assert(!dummyOld.is_forwarded(), 1296 "should not be called with forwarding pointer mark word."); 1297 1298 bool failed_to_promote = false; 1299 oop new_obj = NULL; 1300 oop forward_ptr; 1301 1302 // Try allocating obj in to-space (unless too old) 1303 if (dummyOld.age() < tenuring_threshold()) { 1304 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1305 if (new_obj == NULL) { 1306 set_survivor_overflow(true); 1307 } 1308 } 1309 1310 if (new_obj == NULL) { 1311 // Either to-space is full or we decided to promote 1312 // try allocating obj tenured 1313 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1314 old, m, sz); 1315 1316 if (new_obj == NULL) { 1317 // promotion failed, forward to self 1318 forward_ptr = old->forward_to_atomic(old); 1319 new_obj = old; 1320 1321 if (forward_ptr != NULL) { 1322 return forward_ptr; // someone else succeeded 1323 } 1324 1325 _promotion_failed = true; 1326 failed_to_promote = true; 1327 1328 preserve_mark_if_necessary(old, m); 1329 par_scan_state->register_promotion_failure(sz); 1330 } 1331 } else { 1332 // Is in to-space; do copying ourselves. 1333 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1334 // Restore the mark word copied above. 1335 new_obj->set_mark(m); 1336 // Increment age if new_obj still in new generation 1337 new_obj->incr_age(); 1338 par_scan_state->age_table()->add(new_obj, sz); 1339 } 1340 assert(new_obj != NULL, "just checking"); 1341 1342 // Now attempt to install the forwarding pointer (atomically). 1343 // We have to copy the mark word before overwriting with forwarding 1344 // ptr, so we can restore it below in the copy. 1345 if (!failed_to_promote) { 1346 forward_ptr = old->forward_to_atomic(new_obj); 1347 } 1348 1349 if (forward_ptr == NULL) { 1350 oop obj_to_push = new_obj; 1351 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1352 // Length field used as index of next element to be scanned. 1353 // Real length can be obtained from real_forwardee() 1354 arrayOop(old)->set_length(0); 1355 obj_to_push = old; 1356 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1357 "push forwarded object"); 1358 } 1359 // Push it on one of the queues of to-be-scanned objects. 1360 bool simulate_overflow = false; 1361 NOT_PRODUCT( 1362 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1363 // simulate a stack overflow 1364 simulate_overflow = true; 1365 } 1366 ) 1367 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1368 // Add stats for overflow pushes. 1369 push_on_overflow_list(old, par_scan_state); 1370 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1371 } 1372 1373 return new_obj; 1374 } 1375 1376 // Oops. Someone beat us to it. Undo the allocation. Where did we 1377 // allocate it? 1378 if (is_in_reserved(new_obj)) { 1379 // Must be in to_space. 1380 assert(to()->is_in_reserved(new_obj), "Checking"); 1381 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1382 } else { 1383 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1384 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1385 (HeapWord*)new_obj, sz); 1386 } 1387 1388 return forward_ptr; 1389 } 1390 1391 #ifndef PRODUCT 1392 // It's OK to call this multi-threaded; the worst thing 1393 // that can happen is that we'll get a bunch of closely 1394 // spaced simulated oveflows, but that's OK, in fact 1395 // probably good as it would exercise the overflow code 1396 // under contention. 1397 bool ParNewGeneration::should_simulate_overflow() { 1398 if (_overflow_counter-- <= 0) { // just being defensive 1399 _overflow_counter = ParGCWorkQueueOverflowInterval; 1400 return true; 1401 } else { 1402 return false; 1403 } 1404 } 1405 #endif 1406 1407 // In case we are using compressed oops, we need to be careful. 1408 // If the object being pushed is an object array, then its length 1409 // field keeps track of the "grey boundary" at which the next 1410 // incremental scan will be done (see ParGCArrayScanChunk). 1411 // When using compressed oops, this length field is kept in the 1412 // lower 32 bits of the erstwhile klass word and cannot be used 1413 // for the overflow chaining pointer (OCP below). As such the OCP 1414 // would itself need to be compressed into the top 32-bits in this 1415 // case. Unfortunately, see below, in the event that we have a 1416 // promotion failure, the node to be pushed on the list can be 1417 // outside of the Java heap, so the heap-based pointer compression 1418 // would not work (we would have potential aliasing between C-heap 1419 // and Java-heap pointers). For this reason, when using compressed 1420 // oops, we simply use a worker-thread-local, non-shared overflow 1421 // list in the form of a growable array, with a slightly different 1422 // overflow stack draining strategy. If/when we start using fat 1423 // stacks here, we can go back to using (fat) pointer chains 1424 // (although some performance comparisons would be useful since 1425 // single global lists have their own performance disadvantages 1426 // as we were made painfully aware not long ago, see 6786503). 1427 #define BUSY (oop(0x1aff1aff)) 1428 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1429 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1430 if (ParGCUseLocalOverflow) { 1431 // In the case of compressed oops, we use a private, not-shared 1432 // overflow stack. 1433 par_scan_state->push_on_overflow_stack(from_space_obj); 1434 } else { 1435 assert(!UseCompressedOops, "Error"); 1436 // if the object has been forwarded to itself, then we cannot 1437 // use the klass pointer for the linked list. Instead we have 1438 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1439 // XXX This is horribly inefficient when a promotion failure occurs 1440 // and should be fixed. XXX FIX ME !!! 1441 #ifndef PRODUCT 1442 Atomic::inc_ptr(&_num_par_pushes); 1443 assert(_num_par_pushes > 0, "Tautology"); 1444 #endif 1445 if (from_space_obj->forwardee() == from_space_obj) { 1446 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1447 listhead->forward_to(from_space_obj); 1448 from_space_obj = listhead; 1449 } 1450 oop observed_overflow_list = _overflow_list; 1451 oop cur_overflow_list; 1452 do { 1453 cur_overflow_list = observed_overflow_list; 1454 if (cur_overflow_list != BUSY) { 1455 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1456 } else { 1457 from_space_obj->set_klass_to_list_ptr(NULL); 1458 } 1459 observed_overflow_list = 1460 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1461 } while (cur_overflow_list != observed_overflow_list); 1462 } 1463 } 1464 1465 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1466 bool res; 1467 1468 if (ParGCUseLocalOverflow) { 1469 res = par_scan_state->take_from_overflow_stack(); 1470 } else { 1471 assert(!UseCompressedOops, "Error"); 1472 res = take_from_overflow_list_work(par_scan_state); 1473 } 1474 return res; 1475 } 1476 1477 1478 // *NOTE*: The overflow list manipulation code here and 1479 // in CMSCollector:: are very similar in shape, 1480 // except that in the CMS case we thread the objects 1481 // directly into the list via their mark word, and do 1482 // not need to deal with special cases below related 1483 // to chunking of object arrays and promotion failure 1484 // handling. 1485 // CR 6797058 has been filed to attempt consolidation of 1486 // the common code. 1487 // Because of the common code, if you make any changes in 1488 // the code below, please check the CMS version to see if 1489 // similar changes might be needed. 1490 // See CMSCollector::par_take_from_overflow_list() for 1491 // more extensive documentation comments. 1492 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1493 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1494 // How many to take? 1495 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1496 (size_t)ParGCDesiredObjsFromOverflowList); 1497 1498 assert(!UseCompressedOops, "Error"); 1499 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1500 if (_overflow_list == NULL) return false; 1501 1502 // Otherwise, there was something there; try claiming the list. 1503 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1504 // Trim off a prefix of at most objsFromOverflow items 1505 Thread* tid = Thread::current(); 1506 size_t spin_count = (size_t)ParallelGCThreads; 1507 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1508 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1509 // someone grabbed it before we did ... 1510 // ... we spin for a short while... 1511 os::sleep(tid, sleep_time_millis, false); 1512 if (_overflow_list == NULL) { 1513 // nothing left to take 1514 return false; 1515 } else if (_overflow_list != BUSY) { 1516 // try and grab the prefix 1517 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1518 } 1519 } 1520 if (prefix == NULL || prefix == BUSY) { 1521 // Nothing to take or waited long enough 1522 if (prefix == NULL) { 1523 // Write back the NULL in case we overwrote it with BUSY above 1524 // and it is still the same value. 1525 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1526 } 1527 return false; 1528 } 1529 assert(prefix != NULL && prefix != BUSY, "Error"); 1530 size_t i = 1; 1531 oop cur = prefix; 1532 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1533 i++; cur = oop(cur->klass()); 1534 } 1535 1536 // Reattach remaining (suffix) to overflow list 1537 if (cur->klass_or_null() == NULL) { 1538 // Write back the NULL in lieu of the BUSY we wrote 1539 // above and it is still the same value. 1540 if (_overflow_list == BUSY) { 1541 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1542 } 1543 } else { 1544 assert(cur->klass_or_null() != BUSY, "Error"); 1545 oop suffix = oop(cur->klass()); // suffix will be put back on global list 1546 cur->set_klass_to_list_ptr(NULL); // break off suffix 1547 // It's possible that the list is still in the empty(busy) state 1548 // we left it in a short while ago; in that case we may be 1549 // able to place back the suffix. 1550 oop observed_overflow_list = _overflow_list; 1551 oop cur_overflow_list = observed_overflow_list; 1552 bool attached = false; 1553 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1554 observed_overflow_list = 1555 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1556 if (cur_overflow_list == observed_overflow_list) { 1557 attached = true; 1558 break; 1559 } else cur_overflow_list = observed_overflow_list; 1560 } 1561 if (!attached) { 1562 // Too bad, someone else got in in between; we'll need to do a splice. 1563 // Find the last item of suffix list 1564 oop last = suffix; 1565 while (last->klass_or_null() != NULL) { 1566 last = oop(last->klass()); 1567 } 1568 // Atomically prepend suffix to current overflow list 1569 observed_overflow_list = _overflow_list; 1570 do { 1571 cur_overflow_list = observed_overflow_list; 1572 if (cur_overflow_list != BUSY) { 1573 // Do the splice ... 1574 last->set_klass_to_list_ptr(cur_overflow_list); 1575 } else { // cur_overflow_list == BUSY 1576 last->set_klass_to_list_ptr(NULL); 1577 } 1578 observed_overflow_list = 1579 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1580 } while (cur_overflow_list != observed_overflow_list); 1581 } 1582 } 1583 1584 // Push objects on prefix list onto this thread's work queue 1585 assert(prefix != NULL && prefix != BUSY, "program logic"); 1586 cur = prefix; 1587 ssize_t n = 0; 1588 while (cur != NULL) { 1589 oop obj_to_push = cur->forwardee(); 1590 oop next = oop(cur->klass_or_null()); 1591 cur->set_klass(obj_to_push->klass()); 1592 // This may be an array object that is self-forwarded. In that case, the list pointer 1593 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1594 if (!is_in_reserved(cur)) { 1595 // This can become a scaling bottleneck when there is work queue overflow coincident 1596 // with promotion failure. 1597 oopDesc* f = cur; 1598 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); 1599 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1600 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1601 obj_to_push = cur; 1602 } 1603 bool ok = work_q->push(obj_to_push); 1604 assert(ok, "Should have succeeded"); 1605 cur = next; 1606 n++; 1607 } 1608 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1609 #ifndef PRODUCT 1610 assert(_num_par_pushes >= n, "Too many pops?"); 1611 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1612 #endif 1613 return true; 1614 } 1615 #undef BUSY 1616 1617 void ParNewGeneration::ref_processor_init() { 1618 if (_ref_processor == NULL) { 1619 // Allocate and initialize a reference processor 1620 _ref_processor = 1621 new ReferenceProcessor(_reserved, // span 1622 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1623 (int) ParallelGCThreads, // mt processing degree 1624 refs_discovery_is_mt(), // mt discovery 1625 (int) ParallelGCThreads, // mt discovery degree 1626 refs_discovery_is_atomic(), // atomic_discovery 1627 NULL, // is_alive_non_header 1628 false); // write barrier for next field updates 1629 } 1630 } 1631 1632 const char* ParNewGeneration::name() const { 1633 return "par new generation"; 1634 } 1635 1636 bool ParNewGeneration::in_use() { 1637 return UseParNewGC && ParallelGCThreads > 0; 1638 }