1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/copyFailedInfo.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" 37 #include "gc_implementation/shared/spaceDecorator.hpp" 38 #include "memory/defNewGeneration.inline.hpp" 39 #include "memory/genCollectedHeap.hpp" 40 #include "memory/genOopClosures.inline.hpp" 41 #include "memory/generation.hpp" 42 #include "memory/generation.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/sharedHeap.hpp" 46 #include "memory/space.hpp" 47 #include "oops/objArrayOop.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "oops/oop.pcgc.inline.hpp" 50 #include "runtime/atomic.inline.hpp" 51 #include "runtime/handles.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/globalDefinitions.hpp" 57 #include "utilities/workgroup.hpp" 58 59 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 60 61 #ifdef _MSC_VER 62 #pragma warning( push ) 63 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 64 #endif 65 ParScanThreadState::ParScanThreadState(Space* to_space_, 66 ParNewGeneration* gen_, 67 Generation* old_gen_, 68 int thread_num_, 69 ObjToScanQueueSet* work_queue_set_, 70 Stack<oop, mtGC>* overflow_stacks_, 71 size_t desired_plab_sz_, 72 ParallelTaskTerminator& term_) : 73 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 74 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 75 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 76 _ageTable(false), // false ==> not the global age table, no perf data. 77 _to_space_alloc_buffer(desired_plab_sz_), 78 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 79 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 80 _older_gen_closure(gen_, this), 81 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 82 &_to_space_root_closure, gen_, &_old_gen_root_closure, 83 work_queue_set_, &term_), 84 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 85 _keep_alive_closure(&_scan_weak_ref_closure), 86 _strong_roots_time(0.0), _term_time(0.0) 87 { 88 #if TASKQUEUE_STATS 89 _term_attempts = 0; 90 _overflow_refills = 0; 91 _overflow_refill_objs = 0; 92 #endif // TASKQUEUE_STATS 93 94 _survivor_chunk_array = 95 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 96 _hash_seed = 17; // Might want to take time-based random value. 97 _start = os::elapsedTime(); 98 _old_gen_closure.set_generation(old_gen_); 99 _old_gen_root_closure.set_generation(old_gen_); 100 } 101 #ifdef _MSC_VER 102 #pragma warning( pop ) 103 #endif 104 105 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 106 size_t plab_word_size) { 107 ChunkArray* sca = survivor_chunk_array(); 108 if (sca != NULL) { 109 // A non-null SCA implies that we want the PLAB data recorded. 110 sca->record_sample(plab_start, plab_word_size); 111 } 112 } 113 114 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 115 return new_obj->is_objArray() && 116 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 117 new_obj != old_obj; 118 } 119 120 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 121 assert(old->is_objArray(), "must be obj array"); 122 assert(old->is_forwarded(), "must be forwarded"); 123 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 124 assert(!old_gen()->is_in(old), "must be in young generation."); 125 126 objArrayOop obj = objArrayOop(old->forwardee()); 127 // Process ParGCArrayScanChunk elements now 128 // and push the remainder back onto queue 129 int start = arrayOop(old)->length(); 130 int end = obj->length(); 131 int remainder = end - start; 132 assert(start <= end, "just checking"); 133 if (remainder > 2 * ParGCArrayScanChunk) { 134 // Test above combines last partial chunk with a full chunk 135 end = start + ParGCArrayScanChunk; 136 arrayOop(old)->set_length(end); 137 // Push remainder. 138 bool ok = work_queue()->push(old); 139 assert(ok, "just popped, push must be okay"); 140 } else { 141 // Restore length so that it can be used if there 142 // is a promotion failure and forwarding pointers 143 // must be removed. 144 arrayOop(old)->set_length(end); 145 } 146 147 // process our set of indices (include header in first chunk) 148 // should make sure end is even (aligned to HeapWord in case of compressed oops) 149 if ((HeapWord *)obj < young_old_boundary()) { 150 // object is in to_space 151 obj->oop_iterate_range(&_to_space_closure, start, end); 152 } else { 153 // object is in old generation 154 obj->oop_iterate_range(&_old_gen_closure, start, end); 155 } 156 } 157 158 159 void ParScanThreadState::trim_queues(int max_size) { 160 ObjToScanQueue* queue = work_queue(); 161 do { 162 while (queue->size() > (juint)max_size) { 163 oop obj_to_scan; 164 if (queue->pop_local(obj_to_scan)) { 165 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 166 if (obj_to_scan->is_objArray() && 167 obj_to_scan->is_forwarded() && 168 obj_to_scan->forwardee() != obj_to_scan) { 169 scan_partial_array_and_push_remainder(obj_to_scan); 170 } else { 171 // object is in to_space 172 obj_to_scan->oop_iterate(&_to_space_closure); 173 } 174 } else { 175 // object is in old generation 176 obj_to_scan->oop_iterate(&_old_gen_closure); 177 } 178 } 179 } 180 // For the case of compressed oops, we have a private, non-shared 181 // overflow stack, so we eagerly drain it so as to more evenly 182 // distribute load early. Note: this may be good to do in 183 // general rather than delay for the final stealing phase. 184 // If applicable, we'll transfer a set of objects over to our 185 // work queue, allowing them to be stolen and draining our 186 // private overflow stack. 187 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 188 } 189 190 bool ParScanThreadState::take_from_overflow_stack() { 191 assert(ParGCUseLocalOverflow, "Else should not call"); 192 assert(young_gen()->overflow_list() == NULL, "Error"); 193 ObjToScanQueue* queue = work_queue(); 194 Stack<oop, mtGC>* const of_stack = overflow_stack(); 195 const size_t num_overflow_elems = of_stack->size(); 196 const size_t space_available = queue->max_elems() - queue->size(); 197 const size_t num_take_elems = MIN3(space_available / 4, 198 ParGCDesiredObjsFromOverflowList, 199 num_overflow_elems); 200 // Transfer the most recent num_take_elems from the overflow 201 // stack to our work queue. 202 for (size_t i = 0; i != num_take_elems; i++) { 203 oop cur = of_stack->pop(); 204 oop obj_to_push = cur->forwardee(); 205 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 206 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 207 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 208 if (should_be_partially_scanned(obj_to_push, cur)) { 209 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 210 obj_to_push = cur; 211 } 212 bool ok = queue->push(obj_to_push); 213 assert(ok, "Should have succeeded"); 214 } 215 assert(young_gen()->overflow_list() == NULL, "Error"); 216 return num_take_elems > 0; // was something transferred? 217 } 218 219 void ParScanThreadState::push_on_overflow_stack(oop p) { 220 assert(ParGCUseLocalOverflow, "Else should not call"); 221 overflow_stack()->push(p); 222 assert(young_gen()->overflow_list() == NULL, "Error"); 223 } 224 225 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 226 227 // Otherwise, if the object is small enough, try to reallocate the 228 // buffer. 229 HeapWord* obj = NULL; 230 if (!_to_space_full) { 231 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 232 Space* const sp = to_space(); 233 if (word_sz * 100 < 234 ParallelGCBufferWastePct * plab->word_sz()) { 235 // Is small enough; abandon this buffer and start a new one. 236 plab->retire(false, false); 237 size_t buf_size = plab->word_sz(); 238 HeapWord* buf_space = sp->par_allocate(buf_size); 239 if (buf_space == NULL) { 240 const size_t min_bytes = 241 ParGCAllocBuffer::min_size() << LogHeapWordSize; 242 size_t free_bytes = sp->free(); 243 while(buf_space == NULL && free_bytes >= min_bytes) { 244 buf_size = free_bytes >> LogHeapWordSize; 245 assert(buf_size == (size_t)align_object_size(buf_size), 246 "Invariant"); 247 buf_space = sp->par_allocate(buf_size); 248 free_bytes = sp->free(); 249 } 250 } 251 if (buf_space != NULL) { 252 plab->set_word_size(buf_size); 253 plab->set_buf(buf_space); 254 record_survivor_plab(buf_space, buf_size); 255 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 256 // Note that we cannot compare buf_size < word_sz below 257 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 258 assert(obj != NULL || plab->words_remaining() < word_sz, 259 "Else should have been able to allocate"); 260 // It's conceivable that we may be able to use the 261 // buffer we just grabbed for subsequent small requests 262 // even if not for this one. 263 } else { 264 // We're used up. 265 _to_space_full = true; 266 } 267 268 } else { 269 // Too large; allocate the object individually. 270 obj = sp->par_allocate(word_sz); 271 } 272 } 273 return obj; 274 } 275 276 277 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 278 size_t word_sz) { 279 // Is the alloc in the current alloc buffer? 280 if (to_space_alloc_buffer()->contains(obj)) { 281 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 282 "Should contain whole object."); 283 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 284 } else { 285 CollectedHeap::fill_with_object(obj, word_sz); 286 } 287 } 288 289 void ParScanThreadState::print_promotion_failure_size() { 290 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 291 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 292 _thread_num, _promotion_failed_info.first_size()); 293 } 294 } 295 296 class ParScanThreadStateSet: private ResourceArray { 297 public: 298 // Initializes states for the specified number of threads; 299 ParScanThreadStateSet(int num_threads, 300 Space& to_space, 301 ParNewGeneration& gen, 302 Generation& old_gen, 303 ObjToScanQueueSet& queue_set, 304 Stack<oop, mtGC>* overflow_stacks_, 305 size_t desired_plab_sz, 306 ParallelTaskTerminator& term); 307 308 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 309 310 inline ParScanThreadState& thread_state(int i); 311 312 void trace_promotion_failed(YoungGCTracer& gc_tracer); 313 void reset(int active_workers, bool promotion_failed); 314 void flush(); 315 316 #if TASKQUEUE_STATS 317 static void 318 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 319 void print_termination_stats(outputStream* const st = gclog_or_tty); 320 static void 321 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 322 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 323 void reset_stats(); 324 #endif // TASKQUEUE_STATS 325 326 private: 327 ParallelTaskTerminator& _term; 328 ParNewGeneration& _gen; 329 Generation& _next_gen; 330 public: 331 bool is_valid(int id) const { return id < length(); } 332 ParallelTaskTerminator* terminator() { return &_term; } 333 }; 334 335 336 ParScanThreadStateSet::ParScanThreadStateSet( 337 int num_threads, Space& to_space, ParNewGeneration& gen, 338 Generation& old_gen, ObjToScanQueueSet& queue_set, 339 Stack<oop, mtGC>* overflow_stacks, 340 size_t desired_plab_sz, ParallelTaskTerminator& term) 341 : ResourceArray(sizeof(ParScanThreadState), num_threads), 342 _gen(gen), _next_gen(old_gen), _term(term) 343 { 344 assert(num_threads > 0, "sanity check!"); 345 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 346 "overflow_stack allocation mismatch"); 347 // Initialize states. 348 for (int i = 0; i < num_threads; ++i) { 349 new ((ParScanThreadState*)_data + i) 350 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 351 overflow_stacks, desired_plab_sz, term); 352 } 353 } 354 355 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 356 { 357 assert(i >= 0 && i < length(), "sanity check!"); 358 return ((ParScanThreadState*)_data)[i]; 359 } 360 361 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { 362 for (int i = 0; i < length(); ++i) { 363 if (thread_state(i).promotion_failed()) { 364 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); 365 thread_state(i).promotion_failed_info().reset(); 366 } 367 } 368 } 369 370 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) 371 { 372 _term.reset_for_reuse(active_threads); 373 if (promotion_failed) { 374 for (int i = 0; i < length(); ++i) { 375 thread_state(i).print_promotion_failure_size(); 376 } 377 } 378 } 379 380 #if TASKQUEUE_STATS 381 void 382 ParScanThreadState::reset_stats() 383 { 384 taskqueue_stats().reset(); 385 _term_attempts = 0; 386 _overflow_refills = 0; 387 _overflow_refill_objs = 0; 388 } 389 390 void ParScanThreadStateSet::reset_stats() 391 { 392 for (int i = 0; i < length(); ++i) { 393 thread_state(i).reset_stats(); 394 } 395 } 396 397 void 398 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 399 { 400 st->print_raw_cr("GC Termination Stats"); 401 st->print_raw_cr(" elapsed --strong roots-- " 402 "-------termination-------"); 403 st->print_raw_cr("thr ms ms % " 404 " ms % attempts"); 405 st->print_raw_cr("--- --------- --------- ------ " 406 "--------- ------ --------"); 407 } 408 409 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 410 { 411 print_termination_stats_hdr(st); 412 413 for (int i = 0; i < length(); ++i) { 414 const ParScanThreadState & pss = thread_state(i); 415 const double elapsed_ms = pss.elapsed_time() * 1000.0; 416 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 417 const double term_ms = pss.term_time() * 1000.0; 418 st->print_cr("%3d %9.2f %9.2f %6.2f " 419 "%9.2f %6.2f " SIZE_FORMAT_W(8), 420 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 421 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 422 } 423 } 424 425 // Print stats related to work queue activity. 426 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 427 { 428 st->print_raw_cr("GC Task Stats"); 429 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 430 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 431 } 432 433 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 434 { 435 print_taskqueue_stats_hdr(st); 436 437 TaskQueueStats totals; 438 for (int i = 0; i < length(); ++i) { 439 const ParScanThreadState & pss = thread_state(i); 440 const TaskQueueStats & stats = pss.taskqueue_stats(); 441 st->print("%3d ", i); stats.print(st); st->cr(); 442 totals += stats; 443 444 if (pss.overflow_refills() > 0) { 445 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 446 SIZE_FORMAT_W(10) " overflow objects", 447 pss.overflow_refills(), pss.overflow_refill_objs()); 448 } 449 } 450 st->print("tot "); totals.print(st); st->cr(); 451 452 DEBUG_ONLY(totals.verify()); 453 } 454 #endif // TASKQUEUE_STATS 455 456 void ParScanThreadStateSet::flush() 457 { 458 // Work in this loop should be kept as lightweight as 459 // possible since this might otherwise become a bottleneck 460 // to scaling. Should we add heavy-weight work into this 461 // loop, consider parallelizing the loop into the worker threads. 462 for (int i = 0; i < length(); ++i) { 463 ParScanThreadState& par_scan_state = thread_state(i); 464 465 // Flush stats related to To-space PLAB activity and 466 // retire the last buffer. 467 par_scan_state.to_space_alloc_buffer()-> 468 flush_stats_and_retire(_gen.plab_stats(), 469 true /* end_of_gc */, 470 false /* retain */); 471 472 // Every thread has its own age table. We need to merge 473 // them all into one. 474 ageTable *local_table = par_scan_state.age_table(); 475 _gen.age_table()->merge(local_table); 476 477 // Inform old gen that we're done. 478 _next_gen.par_promote_alloc_done(i); 479 _next_gen.par_oop_since_save_marks_iterate_done(i); 480 } 481 482 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 483 // We need to call this even when ResizeOldPLAB is disabled 484 // so as to avoid breaking some asserts. While we may be able 485 // to avoid this by reorganizing the code a bit, I am loathe 486 // to do that unless we find cases where ergo leads to bad 487 // performance. 488 CFLS_LAB::compute_desired_plab_size(); 489 } 490 } 491 492 ParScanClosure::ParScanClosure(ParNewGeneration* g, 493 ParScanThreadState* par_scan_state) : 494 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) 495 { 496 assert(_g->level() == 0, "Optimized for youngest generation"); 497 _boundary = _g->reserved().end(); 498 } 499 500 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 501 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 502 503 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 504 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 505 506 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 507 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 508 509 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 510 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 511 512 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 513 ParScanThreadState* par_scan_state) 514 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 515 {} 516 517 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 518 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 519 520 #ifdef WIN32 521 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 522 #endif 523 524 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 525 ParScanThreadState* par_scan_state_, 526 ParScanWithoutBarrierClosure* to_space_closure_, 527 ParScanWithBarrierClosure* old_gen_closure_, 528 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 529 ParNewGeneration* par_gen_, 530 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 531 ObjToScanQueueSet* task_queues_, 532 ParallelTaskTerminator* terminator_) : 533 534 _par_scan_state(par_scan_state_), 535 _to_space_closure(to_space_closure_), 536 _old_gen_closure(old_gen_closure_), 537 _to_space_root_closure(to_space_root_closure_), 538 _old_gen_root_closure(old_gen_root_closure_), 539 _par_gen(par_gen_), 540 _task_queues(task_queues_), 541 _terminator(terminator_) 542 {} 543 544 void ParEvacuateFollowersClosure::do_void() { 545 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 546 547 while (true) { 548 549 // Scan to-space and old-gen objs until we run out of both. 550 oop obj_to_scan; 551 par_scan_state()->trim_queues(0); 552 553 // We have no local work, attempt to steal from other threads. 554 555 // attempt to steal work from promoted. 556 if (task_queues()->steal(par_scan_state()->thread_num(), 557 par_scan_state()->hash_seed(), 558 obj_to_scan)) { 559 bool res = work_q->push(obj_to_scan); 560 assert(res, "Empty queue should have room for a push."); 561 562 // if successful, goto Start. 563 continue; 564 565 // try global overflow list. 566 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 567 continue; 568 } 569 570 // Otherwise, offer termination. 571 par_scan_state()->start_term_time(); 572 if (terminator()->offer_termination()) break; 573 par_scan_state()->end_term_time(); 574 } 575 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 576 "Broken overflow list?"); 577 // Finish the last termination pause. 578 par_scan_state()->end_term_time(); 579 } 580 581 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 582 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 583 AbstractGangTask("ParNewGeneration collection"), 584 _gen(gen), _next_gen(next_gen), 585 _young_old_boundary(young_old_boundary), 586 _state_set(state_set) 587 {} 588 589 // Reset the terminator for the given number of 590 // active threads. 591 void ParNewGenTask::set_for_termination(int active_workers) { 592 _state_set->reset(active_workers, _gen->promotion_failed()); 593 // Should the heap be passed in? There's only 1 for now so 594 // grab it instead. 595 GenCollectedHeap* gch = GenCollectedHeap::heap(); 596 gch->set_n_termination(active_workers); 597 } 598 599 void ParNewGenTask::work(uint worker_id) { 600 GenCollectedHeap* gch = GenCollectedHeap::heap(); 601 // Since this is being done in a separate thread, need new resource 602 // and handle marks. 603 ResourceMark rm; 604 HandleMark hm; 605 // We would need multiple old-gen queues otherwise. 606 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 607 608 Generation* old_gen = gch->next_gen(_gen); 609 610 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 611 assert(_state_set->is_valid(worker_id), "Should not have been called"); 612 613 par_scan_state.set_young_old_boundary(_young_old_boundary); 614 615 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 616 gch->rem_set()->klass_rem_set()); 617 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 618 &par_scan_state.to_space_root_closure(), 619 false); 620 621 par_scan_state.start_strong_roots(); 622 gch->gen_process_roots(_gen->level(), 623 true, // Process younger gens, if any, 624 // as strong roots. 625 false, // no scope; this is parallel code 626 SharedHeap::SO_ScavengeCodeCache, 627 GenCollectedHeap::StrongAndWeakRoots, 628 &par_scan_state.to_space_root_closure(), 629 &par_scan_state.older_gen_closure(), 630 &cld_scan_closure); 631 632 par_scan_state.end_strong_roots(); 633 634 // "evacuate followers". 635 par_scan_state.evacuate_followers_closure().do_void(); 636 } 637 638 #ifdef _MSC_VER 639 #pragma warning( push ) 640 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 641 #endif 642 ParNewGeneration:: 643 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 644 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 645 _overflow_list(NULL), 646 _is_alive_closure(this), 647 _plab_stats(YoungPLABSize, PLABWeight) 648 { 649 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 650 NOT_PRODUCT(_num_par_pushes = 0;) 651 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 652 guarantee(_task_queues != NULL, "task_queues allocation failure."); 653 654 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 655 ObjToScanQueue *q = new ObjToScanQueue(); 656 guarantee(q != NULL, "work_queue Allocation failure."); 657 _task_queues->register_queue(i1, q); 658 } 659 660 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 661 _task_queues->queue(i2)->initialize(); 662 663 _overflow_stacks = NULL; 664 if (ParGCUseLocalOverflow) { 665 666 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 667 // with ',' 668 typedef Stack<oop, mtGC> GCOopStack; 669 670 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 671 for (size_t i = 0; i < ParallelGCThreads; ++i) { 672 new (_overflow_stacks + i) Stack<oop, mtGC>(); 673 } 674 } 675 676 if (UsePerfData) { 677 EXCEPTION_MARK; 678 ResourceMark rm; 679 680 const char* cname = 681 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 682 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 683 ParallelGCThreads, CHECK); 684 } 685 } 686 #ifdef _MSC_VER 687 #pragma warning( pop ) 688 #endif 689 690 // ParNewGeneration:: 691 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 692 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 693 694 template <class T> 695 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 696 #ifdef ASSERT 697 { 698 assert(!oopDesc::is_null(*p), "expected non-null ref"); 699 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 700 // We never expect to see a null reference being processed 701 // as a weak reference. 702 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 703 } 704 #endif // ASSERT 705 706 _par_cl->do_oop_nv(p); 707 708 if (Universe::heap()->is_in_reserved(p)) { 709 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 710 _rs->write_ref_field_gc_par(p, obj); 711 } 712 } 713 714 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 715 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 716 717 // ParNewGeneration:: 718 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 719 DefNewGeneration::KeepAliveClosure(cl) {} 720 721 template <class T> 722 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 723 #ifdef ASSERT 724 { 725 assert(!oopDesc::is_null(*p), "expected non-null ref"); 726 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 727 // We never expect to see a null reference being processed 728 // as a weak reference. 729 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 730 } 731 #endif // ASSERT 732 733 _cl->do_oop_nv(p); 734 735 if (Universe::heap()->is_in_reserved(p)) { 736 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 737 _rs->write_ref_field_gc_par(p, obj); 738 } 739 } 740 741 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 742 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 743 744 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 745 T heap_oop = oopDesc::load_heap_oop(p); 746 if (!oopDesc::is_null(heap_oop)) { 747 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 748 if ((HeapWord*)obj < _boundary) { 749 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 750 oop new_obj = obj->is_forwarded() 751 ? obj->forwardee() 752 : _g->DefNewGeneration::copy_to_survivor_space(obj); 753 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 754 } 755 if (_gc_barrier) { 756 // If p points to a younger generation, mark the card. 757 if ((HeapWord*)obj < _gen_boundary) { 758 _rs->write_ref_field_gc_par(p, obj); 759 } 760 } 761 } 762 } 763 764 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 765 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 766 767 class ParNewRefProcTaskProxy: public AbstractGangTask { 768 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 769 public: 770 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 771 Generation& next_gen, 772 HeapWord* young_old_boundary, 773 ParScanThreadStateSet& state_set); 774 775 private: 776 virtual void work(uint worker_id); 777 virtual void set_for_termination(int active_workers) { 778 _state_set.terminator()->reset_for_reuse(active_workers); 779 } 780 private: 781 ParNewGeneration& _gen; 782 ProcessTask& _task; 783 Generation& _next_gen; 784 HeapWord* _young_old_boundary; 785 ParScanThreadStateSet& _state_set; 786 }; 787 788 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 789 ProcessTask& task, ParNewGeneration& gen, 790 Generation& next_gen, 791 HeapWord* young_old_boundary, 792 ParScanThreadStateSet& state_set) 793 : AbstractGangTask("ParNewGeneration parallel reference processing"), 794 _gen(gen), 795 _task(task), 796 _next_gen(next_gen), 797 _young_old_boundary(young_old_boundary), 798 _state_set(state_set) 799 { 800 } 801 802 void ParNewRefProcTaskProxy::work(uint worker_id) 803 { 804 ResourceMark rm; 805 HandleMark hm; 806 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 807 par_scan_state.set_young_old_boundary(_young_old_boundary); 808 _task.work(worker_id, par_scan_state.is_alive_closure(), 809 par_scan_state.keep_alive_closure(), 810 par_scan_state.evacuate_followers_closure()); 811 } 812 813 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 814 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 815 EnqueueTask& _task; 816 817 public: 818 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 819 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 820 _task(task) 821 { } 822 823 virtual void work(uint worker_id) 824 { 825 _task.work(worker_id); 826 } 827 }; 828 829 830 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 831 { 832 GenCollectedHeap* gch = GenCollectedHeap::heap(); 833 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 834 "not a generational heap"); 835 FlexibleWorkGang* workers = gch->workers(); 836 assert(workers != NULL, "Need parallel worker threads."); 837 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 838 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 839 _generation.reserved().end(), _state_set); 840 workers->run_task(&rp_task); 841 _state_set.reset(0 /* bad value in debug if not reset */, 842 _generation.promotion_failed()); 843 } 844 845 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 846 { 847 GenCollectedHeap* gch = GenCollectedHeap::heap(); 848 FlexibleWorkGang* workers = gch->workers(); 849 assert(workers != NULL, "Need parallel worker threads."); 850 ParNewRefEnqueueTaskProxy enq_task(task); 851 workers->run_task(&enq_task); 852 } 853 854 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 855 { 856 _state_set.flush(); 857 GenCollectedHeap* gch = GenCollectedHeap::heap(); 858 gch->set_par_threads(0); // 0 ==> non-parallel. 859 gch->save_marks(); 860 } 861 862 ScanClosureWithParBarrier:: 863 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 864 ScanClosure(g, gc_barrier) {} 865 866 EvacuateFollowersClosureGeneral:: 867 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 868 OopsInGenClosure* cur, 869 OopsInGenClosure* older) : 870 _gch(gch), _level(level), 871 _scan_cur_or_nonheap(cur), _scan_older(older) 872 {} 873 874 void EvacuateFollowersClosureGeneral::do_void() { 875 do { 876 // Beware: this call will lead to closure applications via virtual 877 // calls. 878 _gch->oop_since_save_marks_iterate(_level, 879 _scan_cur_or_nonheap, 880 _scan_older); 881 } while (!_gch->no_allocs_since_save_marks(_level)); 882 } 883 884 885 // A Generation that does parallel young-gen collection. 886 887 bool ParNewGeneration::_avoid_promotion_undo = false; 888 889 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { 890 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 891 _promo_failure_scan_stack.clear(true); // Clear cached segments. 892 893 remove_forwarding_pointers(); 894 if (PrintGCDetails) { 895 gclog_or_tty->print(" (promotion failed)"); 896 } 897 // All the spaces are in play for mark-sweep. 898 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 899 from()->set_next_compaction_space(to()); 900 gch->set_incremental_collection_failed(); 901 // Inform the next generation that a promotion failure occurred. 902 _next_gen->promotion_failure_occurred(); 903 904 // Trace promotion failure in the parallel GC threads 905 thread_state_set.trace_promotion_failed(gc_tracer); 906 // Single threaded code may have reported promotion failure to the global state 907 if (_promotion_failed_info.has_failed()) { 908 gc_tracer.report_promotion_failed(_promotion_failed_info); 909 } 910 // Reset the PromotionFailureALot counters. 911 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 912 } 913 914 void ParNewGeneration::collect(bool full, 915 bool clear_all_soft_refs, 916 size_t size, 917 bool is_tlab) { 918 assert(full || size > 0, "otherwise we don't want to collect"); 919 920 GenCollectedHeap* gch = GenCollectedHeap::heap(); 921 922 _gc_timer->register_gc_start(); 923 924 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 925 "not a CMS generational heap"); 926 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 927 FlexibleWorkGang* workers = gch->workers(); 928 assert(workers != NULL, "Need workgang for parallel work"); 929 int active_workers = 930 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 931 workers->active_workers(), 932 Threads::number_of_non_daemon_threads()); 933 workers->set_active_workers(active_workers); 934 assert(gch->n_gens() == 2, 935 "Par collection currently only works with single older gen."); 936 _next_gen = gch->next_gen(this); 937 // Do we have to avoid promotion_undo? 938 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 939 set_avoid_promotion_undo(true); 940 } 941 942 // If the next generation is too full to accommodate worst-case promotion 943 // from this generation, pass on collection; let the next generation 944 // do it. 945 if (!collection_attempt_is_safe()) { 946 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 947 return; 948 } 949 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 950 951 ParNewTracer gc_tracer; 952 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 953 gch->trace_heap_before_gc(&gc_tracer); 954 955 init_assuming_no_promotion_failure(); 956 957 if (UseAdaptiveSizePolicy) { 958 set_survivor_overflow(false); 959 size_policy->minor_collection_begin(); 960 } 961 962 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 963 // Capture heap used before collection (for printing). 964 size_t gch_prev_used = gch->used(); 965 966 SpecializationStats::clear(); 967 968 age_table()->clear(); 969 to()->clear(SpaceDecorator::Mangle); 970 971 gch->save_marks(); 972 assert(workers != NULL, "Need parallel worker threads."); 973 int n_workers = active_workers; 974 975 // Set the correct parallelism (number of queues) in the reference processor 976 ref_processor()->set_active_mt_degree(n_workers); 977 978 // Always set the terminator for the active number of workers 979 // because only those workers go through the termination protocol. 980 ParallelTaskTerminator _term(n_workers, task_queues()); 981 ParScanThreadStateSet thread_state_set(workers->active_workers(), 982 *to(), *this, *_next_gen, *task_queues(), 983 _overflow_stacks, desired_plab_sz(), _term); 984 985 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 986 gch->set_par_threads(n_workers); 987 gch->rem_set()->prepare_for_younger_refs_iterate(true); 988 // It turns out that even when we're using 1 thread, doing the work in a 989 // separate thread causes wide variance in run times. We can't help this 990 // in the multi-threaded case, but we special-case n=1 here to get 991 // repeatable measurements of the 1-thread overhead of the parallel code. 992 if (n_workers > 1) { 993 GenCollectedHeap::StrongRootsScope srs(gch); 994 workers->run_task(&tsk); 995 } else { 996 GenCollectedHeap::StrongRootsScope srs(gch); 997 tsk.work(0); 998 } 999 thread_state_set.reset(0 /* Bad value in debug if not reset */, 1000 promotion_failed()); 1001 1002 // Process (weak) reference objects found during scavenge. 1003 ReferenceProcessor* rp = ref_processor(); 1004 IsAliveClosure is_alive(this); 1005 ScanWeakRefClosure scan_weak_ref(this); 1006 KeepAliveClosure keep_alive(&scan_weak_ref); 1007 ScanClosure scan_without_gc_barrier(this, false); 1008 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 1009 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 1010 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 1011 &scan_without_gc_barrier, &scan_with_gc_barrier); 1012 rp->setup_policy(clear_all_soft_refs); 1013 // Can the mt_degree be set later (at run_task() time would be best)? 1014 rp->set_active_mt_degree(active_workers); 1015 ReferenceProcessorStats stats; 1016 if (rp->processing_is_mt()) { 1017 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1018 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1019 &evacuate_followers, &task_executor, 1020 _gc_timer, gc_tracer.gc_id()); 1021 } else { 1022 thread_state_set.flush(); 1023 gch->set_par_threads(0); // 0 ==> non-parallel. 1024 gch->save_marks(); 1025 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1026 &evacuate_followers, NULL, 1027 _gc_timer, gc_tracer.gc_id()); 1028 } 1029 gc_tracer.report_gc_reference_stats(stats); 1030 if (!promotion_failed()) { 1031 // Swap the survivor spaces. 1032 eden()->clear(SpaceDecorator::Mangle); 1033 from()->clear(SpaceDecorator::Mangle); 1034 if (ZapUnusedHeapArea) { 1035 // This is now done here because of the piece-meal mangling which 1036 // can check for valid mangling at intermediate points in the 1037 // collection(s). When a minor collection fails to collect 1038 // sufficient space resizing of the young generation can occur 1039 // an redistribute the spaces in the young generation. Mangle 1040 // here so that unzapped regions don't get distributed to 1041 // other spaces. 1042 to()->mangle_unused_area(); 1043 } 1044 swap_spaces(); 1045 1046 // A successful scavenge should restart the GC time limit count which is 1047 // for full GC's. 1048 size_policy->reset_gc_overhead_limit_count(); 1049 1050 assert(to()->is_empty(), "to space should be empty now"); 1051 1052 adjust_desired_tenuring_threshold(); 1053 } else { 1054 handle_promotion_failed(gch, thread_state_set, gc_tracer); 1055 } 1056 // set new iteration safe limit for the survivor spaces 1057 from()->set_concurrent_iteration_safe_limit(from()->top()); 1058 to()->set_concurrent_iteration_safe_limit(to()->top()); 1059 1060 if (ResizePLAB) { 1061 plab_stats()->adjust_desired_plab_sz(n_workers); 1062 } 1063 1064 if (PrintGC && !PrintGCDetails) { 1065 gch->print_heap_change(gch_prev_used); 1066 } 1067 1068 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1069 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1070 1071 if (UseAdaptiveSizePolicy) { 1072 size_policy->minor_collection_end(gch->gc_cause()); 1073 size_policy->avg_survived()->sample(from()->used()); 1074 } 1075 1076 // We need to use a monotonically non-decreasing time in ms 1077 // or we will see time-warp warnings and os::javaTimeMillis() 1078 // does not guarantee monotonicity. 1079 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1080 update_time_of_last_gc(now); 1081 1082 SpecializationStats::print(); 1083 1084 rp->set_enqueuing_is_done(true); 1085 if (rp->processing_is_mt()) { 1086 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1087 rp->enqueue_discovered_references(&task_executor); 1088 } else { 1089 rp->enqueue_discovered_references(NULL); 1090 } 1091 rp->verify_no_references_recorded(); 1092 1093 gch->trace_heap_after_gc(&gc_tracer); 1094 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1095 1096 _gc_timer->register_gc_end(); 1097 1098 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1099 } 1100 1101 static int sum; 1102 void ParNewGeneration::waste_some_time() { 1103 for (int i = 0; i < 100; i++) { 1104 sum += i; 1105 } 1106 } 1107 1108 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1109 1110 // Because of concurrency, there are times where an object for which 1111 // "is_forwarded()" is true contains an "interim" forwarding pointer 1112 // value. Such a value will soon be overwritten with a real value. 1113 // This method requires "obj" to have a forwarding pointer, and waits, if 1114 // necessary for a real one to be inserted, and returns it. 1115 1116 oop ParNewGeneration::real_forwardee(oop obj) { 1117 oop forward_ptr = obj->forwardee(); 1118 if (forward_ptr != ClaimedForwardPtr) { 1119 return forward_ptr; 1120 } else { 1121 return real_forwardee_slow(obj); 1122 } 1123 } 1124 1125 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1126 // Spin-read if it is claimed but not yet written by another thread. 1127 oop forward_ptr = obj->forwardee(); 1128 while (forward_ptr == ClaimedForwardPtr) { 1129 waste_some_time(); 1130 assert(obj->is_forwarded(), "precondition"); 1131 forward_ptr = obj->forwardee(); 1132 } 1133 return forward_ptr; 1134 } 1135 1136 #ifdef ASSERT 1137 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1138 return 1139 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1140 || Universe::heap()->is_in_reserved(p); 1141 } 1142 #endif 1143 1144 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1145 if (m->must_be_preserved_for_promotion_failure(obj)) { 1146 // We should really have separate per-worker stacks, rather 1147 // than use locking of a common pair of stacks. 1148 MutexLocker ml(ParGCRareEvent_lock); 1149 preserve_mark(obj, m); 1150 } 1151 } 1152 1153 // Multiple GC threads may try to promote an object. If the object 1154 // is successfully promoted, a forwarding pointer will be installed in 1155 // the object in the young generation. This method claims the right 1156 // to install the forwarding pointer before it copies the object, 1157 // thus avoiding the need to undo the copy as in 1158 // copy_to_survivor_space_avoiding_with_undo. 1159 1160 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1161 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1162 // In the sequential version, this assert also says that the object is 1163 // not forwarded. That might not be the case here. It is the case that 1164 // the caller observed it to be not forwarded at some time in the past. 1165 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1166 1167 // The sequential code read "old->age()" below. That doesn't work here, 1168 // since the age is in the mark word, and that might be overwritten with 1169 // a forwarding pointer by a parallel thread. So we must save the mark 1170 // word in a local and then analyze it. 1171 oopDesc dummyOld; 1172 dummyOld.set_mark(m); 1173 assert(!dummyOld.is_forwarded(), 1174 "should not be called with forwarding pointer mark word."); 1175 1176 oop new_obj = NULL; 1177 oop forward_ptr; 1178 1179 // Try allocating obj in to-space (unless too old) 1180 if (dummyOld.age() < tenuring_threshold()) { 1181 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1182 if (new_obj == NULL) { 1183 set_survivor_overflow(true); 1184 } 1185 } 1186 1187 if (new_obj == NULL) { 1188 // Either to-space is full or we decided to promote 1189 // try allocating obj tenured 1190 1191 // Attempt to install a null forwarding pointer (atomically), 1192 // to claim the right to install the real forwarding pointer. 1193 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1194 if (forward_ptr != NULL) { 1195 // someone else beat us to it. 1196 return real_forwardee(old); 1197 } 1198 1199 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1200 old, m, sz); 1201 1202 if (new_obj == NULL) { 1203 // promotion failed, forward to self 1204 _promotion_failed = true; 1205 new_obj = old; 1206 1207 preserve_mark_if_necessary(old, m); 1208 par_scan_state->register_promotion_failure(sz); 1209 } 1210 1211 old->forward_to(new_obj); 1212 forward_ptr = NULL; 1213 } else { 1214 // Is in to-space; do copying ourselves. 1215 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1216 forward_ptr = old->forward_to_atomic(new_obj); 1217 // Restore the mark word copied above. 1218 new_obj->set_mark(m); 1219 // Increment age if obj still in new generation 1220 new_obj->incr_age(); 1221 par_scan_state->age_table()->add(new_obj, sz); 1222 } 1223 assert(new_obj != NULL, "just checking"); 1224 1225 #ifndef PRODUCT 1226 // This code must come after the CAS test, or it will print incorrect 1227 // information. 1228 if (TraceScavenge) { 1229 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1230 is_in_reserved(new_obj) ? "copying" : "tenuring", 1231 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); 1232 } 1233 #endif 1234 1235 if (forward_ptr == NULL) { 1236 oop obj_to_push = new_obj; 1237 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1238 // Length field used as index of next element to be scanned. 1239 // Real length can be obtained from real_forwardee() 1240 arrayOop(old)->set_length(0); 1241 obj_to_push = old; 1242 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1243 "push forwarded object"); 1244 } 1245 // Push it on one of the queues of to-be-scanned objects. 1246 bool simulate_overflow = false; 1247 NOT_PRODUCT( 1248 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1249 // simulate a stack overflow 1250 simulate_overflow = true; 1251 } 1252 ) 1253 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1254 // Add stats for overflow pushes. 1255 if (Verbose && PrintGCDetails) { 1256 gclog_or_tty->print("queue overflow!\n"); 1257 } 1258 push_on_overflow_list(old, par_scan_state); 1259 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1260 } 1261 1262 return new_obj; 1263 } 1264 1265 // Oops. Someone beat us to it. Undo the allocation. Where did we 1266 // allocate it? 1267 if (is_in_reserved(new_obj)) { 1268 // Must be in to_space. 1269 assert(to()->is_in_reserved(new_obj), "Checking"); 1270 if (forward_ptr == ClaimedForwardPtr) { 1271 // Wait to get the real forwarding pointer value. 1272 forward_ptr = real_forwardee(old); 1273 } 1274 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1275 } 1276 1277 return forward_ptr; 1278 } 1279 1280 1281 // Multiple GC threads may try to promote the same object. If two 1282 // or more GC threads copy the object, only one wins the race to install 1283 // the forwarding pointer. The other threads have to undo their copy. 1284 1285 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1286 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1287 1288 // In the sequential version, this assert also says that the object is 1289 // not forwarded. That might not be the case here. It is the case that 1290 // the caller observed it to be not forwarded at some time in the past. 1291 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1292 1293 // The sequential code read "old->age()" below. That doesn't work here, 1294 // since the age is in the mark word, and that might be overwritten with 1295 // a forwarding pointer by a parallel thread. So we must save the mark 1296 // word here, install it in a local oopDesc, and then analyze it. 1297 oopDesc dummyOld; 1298 dummyOld.set_mark(m); 1299 assert(!dummyOld.is_forwarded(), 1300 "should not be called with forwarding pointer mark word."); 1301 1302 bool failed_to_promote = false; 1303 oop new_obj = NULL; 1304 oop forward_ptr; 1305 1306 // Try allocating obj in to-space (unless too old) 1307 if (dummyOld.age() < tenuring_threshold()) { 1308 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1309 if (new_obj == NULL) { 1310 set_survivor_overflow(true); 1311 } 1312 } 1313 1314 if (new_obj == NULL) { 1315 // Either to-space is full or we decided to promote 1316 // try allocating obj tenured 1317 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1318 old, m, sz); 1319 1320 if (new_obj == NULL) { 1321 // promotion failed, forward to self 1322 forward_ptr = old->forward_to_atomic(old); 1323 new_obj = old; 1324 1325 if (forward_ptr != NULL) { 1326 return forward_ptr; // someone else succeeded 1327 } 1328 1329 _promotion_failed = true; 1330 failed_to_promote = true; 1331 1332 preserve_mark_if_necessary(old, m); 1333 par_scan_state->register_promotion_failure(sz); 1334 } 1335 } else { 1336 // Is in to-space; do copying ourselves. 1337 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1338 // Restore the mark word copied above. 1339 new_obj->set_mark(m); 1340 // Increment age if new_obj still in new generation 1341 new_obj->incr_age(); 1342 par_scan_state->age_table()->add(new_obj, sz); 1343 } 1344 assert(new_obj != NULL, "just checking"); 1345 1346 #ifndef PRODUCT 1347 // This code must come after the CAS test, or it will print incorrect 1348 // information. 1349 if (TraceScavenge) { 1350 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1351 is_in_reserved(new_obj) ? "copying" : "tenuring", 1352 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); 1353 } 1354 #endif 1355 1356 // Now attempt to install the forwarding pointer (atomically). 1357 // We have to copy the mark word before overwriting with forwarding 1358 // ptr, so we can restore it below in the copy. 1359 if (!failed_to_promote) { 1360 forward_ptr = old->forward_to_atomic(new_obj); 1361 } 1362 1363 if (forward_ptr == NULL) { 1364 oop obj_to_push = new_obj; 1365 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1366 // Length field used as index of next element to be scanned. 1367 // Real length can be obtained from real_forwardee() 1368 arrayOop(old)->set_length(0); 1369 obj_to_push = old; 1370 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1371 "push forwarded object"); 1372 } 1373 // Push it on one of the queues of to-be-scanned objects. 1374 bool simulate_overflow = false; 1375 NOT_PRODUCT( 1376 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1377 // simulate a stack overflow 1378 simulate_overflow = true; 1379 } 1380 ) 1381 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1382 // Add stats for overflow pushes. 1383 push_on_overflow_list(old, par_scan_state); 1384 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1385 } 1386 1387 return new_obj; 1388 } 1389 1390 // Oops. Someone beat us to it. Undo the allocation. Where did we 1391 // allocate it? 1392 if (is_in_reserved(new_obj)) { 1393 // Must be in to_space. 1394 assert(to()->is_in_reserved(new_obj), "Checking"); 1395 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1396 } else { 1397 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1398 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1399 (HeapWord*)new_obj, sz); 1400 } 1401 1402 return forward_ptr; 1403 } 1404 1405 #ifndef PRODUCT 1406 // It's OK to call this multi-threaded; the worst thing 1407 // that can happen is that we'll get a bunch of closely 1408 // spaced simulated overflows, but that's OK, in fact 1409 // probably good as it would exercise the overflow code 1410 // under contention. 1411 bool ParNewGeneration::should_simulate_overflow() { 1412 if (_overflow_counter-- <= 0) { // just being defensive 1413 _overflow_counter = ParGCWorkQueueOverflowInterval; 1414 return true; 1415 } else { 1416 return false; 1417 } 1418 } 1419 #endif 1420 1421 // In case we are using compressed oops, we need to be careful. 1422 // If the object being pushed is an object array, then its length 1423 // field keeps track of the "grey boundary" at which the next 1424 // incremental scan will be done (see ParGCArrayScanChunk). 1425 // When using compressed oops, this length field is kept in the 1426 // lower 32 bits of the erstwhile klass word and cannot be used 1427 // for the overflow chaining pointer (OCP below). As such the OCP 1428 // would itself need to be compressed into the top 32-bits in this 1429 // case. Unfortunately, see below, in the event that we have a 1430 // promotion failure, the node to be pushed on the list can be 1431 // outside of the Java heap, so the heap-based pointer compression 1432 // would not work (we would have potential aliasing between C-heap 1433 // and Java-heap pointers). For this reason, when using compressed 1434 // oops, we simply use a worker-thread-local, non-shared overflow 1435 // list in the form of a growable array, with a slightly different 1436 // overflow stack draining strategy. If/when we start using fat 1437 // stacks here, we can go back to using (fat) pointer chains 1438 // (although some performance comparisons would be useful since 1439 // single global lists have their own performance disadvantages 1440 // as we were made painfully aware not long ago, see 6786503). 1441 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1442 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1443 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1444 if (ParGCUseLocalOverflow) { 1445 // In the case of compressed oops, we use a private, not-shared 1446 // overflow stack. 1447 par_scan_state->push_on_overflow_stack(from_space_obj); 1448 } else { 1449 assert(!UseCompressedOops, "Error"); 1450 // if the object has been forwarded to itself, then we cannot 1451 // use the klass pointer for the linked list. Instead we have 1452 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1453 // XXX This is horribly inefficient when a promotion failure occurs 1454 // and should be fixed. XXX FIX ME !!! 1455 #ifndef PRODUCT 1456 Atomic::inc_ptr(&_num_par_pushes); 1457 assert(_num_par_pushes > 0, "Tautology"); 1458 #endif 1459 if (from_space_obj->forwardee() == from_space_obj) { 1460 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1461 listhead->forward_to(from_space_obj); 1462 from_space_obj = listhead; 1463 } 1464 oop observed_overflow_list = _overflow_list; 1465 oop cur_overflow_list; 1466 do { 1467 cur_overflow_list = observed_overflow_list; 1468 if (cur_overflow_list != BUSY) { 1469 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1470 } else { 1471 from_space_obj->set_klass_to_list_ptr(NULL); 1472 } 1473 observed_overflow_list = 1474 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1475 } while (cur_overflow_list != observed_overflow_list); 1476 } 1477 } 1478 1479 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1480 bool res; 1481 1482 if (ParGCUseLocalOverflow) { 1483 res = par_scan_state->take_from_overflow_stack(); 1484 } else { 1485 assert(!UseCompressedOops, "Error"); 1486 res = take_from_overflow_list_work(par_scan_state); 1487 } 1488 return res; 1489 } 1490 1491 1492 // *NOTE*: The overflow list manipulation code here and 1493 // in CMSCollector:: are very similar in shape, 1494 // except that in the CMS case we thread the objects 1495 // directly into the list via their mark word, and do 1496 // not need to deal with special cases below related 1497 // to chunking of object arrays and promotion failure 1498 // handling. 1499 // CR 6797058 has been filed to attempt consolidation of 1500 // the common code. 1501 // Because of the common code, if you make any changes in 1502 // the code below, please check the CMS version to see if 1503 // similar changes might be needed. 1504 // See CMSCollector::par_take_from_overflow_list() for 1505 // more extensive documentation comments. 1506 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1507 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1508 // How many to take? 1509 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1510 (size_t)ParGCDesiredObjsFromOverflowList); 1511 1512 assert(!UseCompressedOops, "Error"); 1513 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1514 if (_overflow_list == NULL) return false; 1515 1516 // Otherwise, there was something there; try claiming the list. 1517 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1518 // Trim off a prefix of at most objsFromOverflow items 1519 Thread* tid = Thread::current(); 1520 size_t spin_count = (size_t)ParallelGCThreads; 1521 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1522 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1523 // someone grabbed it before we did ... 1524 // ... we spin for a short while... 1525 os::sleep(tid, sleep_time_millis, false); 1526 if (_overflow_list == NULL) { 1527 // nothing left to take 1528 return false; 1529 } else if (_overflow_list != BUSY) { 1530 // try and grab the prefix 1531 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1532 } 1533 } 1534 if (prefix == NULL || prefix == BUSY) { 1535 // Nothing to take or waited long enough 1536 if (prefix == NULL) { 1537 // Write back the NULL in case we overwrote it with BUSY above 1538 // and it is still the same value. 1539 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1540 } 1541 return false; 1542 } 1543 assert(prefix != NULL && prefix != BUSY, "Error"); 1544 size_t i = 1; 1545 oop cur = prefix; 1546 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1547 i++; cur = cur->list_ptr_from_klass(); 1548 } 1549 1550 // Reattach remaining (suffix) to overflow list 1551 if (cur->klass_or_null() == NULL) { 1552 // Write back the NULL in lieu of the BUSY we wrote 1553 // above and it is still the same value. 1554 if (_overflow_list == BUSY) { 1555 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1556 } 1557 } else { 1558 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1559 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1560 cur->set_klass_to_list_ptr(NULL); // break off suffix 1561 // It's possible that the list is still in the empty(busy) state 1562 // we left it in a short while ago; in that case we may be 1563 // able to place back the suffix. 1564 oop observed_overflow_list = _overflow_list; 1565 oop cur_overflow_list = observed_overflow_list; 1566 bool attached = false; 1567 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1568 observed_overflow_list = 1569 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1570 if (cur_overflow_list == observed_overflow_list) { 1571 attached = true; 1572 break; 1573 } else cur_overflow_list = observed_overflow_list; 1574 } 1575 if (!attached) { 1576 // Too bad, someone else got in in between; we'll need to do a splice. 1577 // Find the last item of suffix list 1578 oop last = suffix; 1579 while (last->klass_or_null() != NULL) { 1580 last = last->list_ptr_from_klass(); 1581 } 1582 // Atomically prepend suffix to current overflow list 1583 observed_overflow_list = _overflow_list; 1584 do { 1585 cur_overflow_list = observed_overflow_list; 1586 if (cur_overflow_list != BUSY) { 1587 // Do the splice ... 1588 last->set_klass_to_list_ptr(cur_overflow_list); 1589 } else { // cur_overflow_list == BUSY 1590 last->set_klass_to_list_ptr(NULL); 1591 } 1592 observed_overflow_list = 1593 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1594 } while (cur_overflow_list != observed_overflow_list); 1595 } 1596 } 1597 1598 // Push objects on prefix list onto this thread's work queue 1599 assert(prefix != NULL && prefix != BUSY, "program logic"); 1600 cur = prefix; 1601 ssize_t n = 0; 1602 while (cur != NULL) { 1603 oop obj_to_push = cur->forwardee(); 1604 oop next = cur->list_ptr_from_klass(); 1605 cur->set_klass(obj_to_push->klass()); 1606 // This may be an array object that is self-forwarded. In that case, the list pointer 1607 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1608 if (!is_in_reserved(cur)) { 1609 // This can become a scaling bottleneck when there is work queue overflow coincident 1610 // with promotion failure. 1611 oopDesc* f = cur; 1612 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); 1613 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1614 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1615 obj_to_push = cur; 1616 } 1617 bool ok = work_q->push(obj_to_push); 1618 assert(ok, "Should have succeeded"); 1619 cur = next; 1620 n++; 1621 } 1622 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1623 #ifndef PRODUCT 1624 assert(_num_par_pushes >= n, "Too many pops?"); 1625 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1626 #endif 1627 return true; 1628 } 1629 #undef BUSY 1630 1631 void ParNewGeneration::ref_processor_init() { 1632 if (_ref_processor == NULL) { 1633 // Allocate and initialize a reference processor 1634 _ref_processor = 1635 new ReferenceProcessor(_reserved, // span 1636 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1637 (int) ParallelGCThreads, // mt processing degree 1638 refs_discovery_is_mt(), // mt discovery 1639 (int) ParallelGCThreads, // mt discovery degree 1640 refs_discovery_is_atomic(), // atomic_discovery 1641 NULL); // is_alive_non_header 1642 } 1643 } 1644 1645 const char* ParNewGeneration::name() const { 1646 return "par new generation"; 1647 }