1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/copyFailedInfo.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" 37 #include "gc_implementation/shared/spaceDecorator.hpp" 38 #include "memory/defNewGeneration.inline.hpp" 39 #include "memory/genCollectedHeap.hpp" 40 #include "memory/genOopClosures.inline.hpp" 41 #include "memory/generation.hpp" 42 #include "memory/generation.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/sharedHeap.hpp" 46 #include "memory/space.hpp" 47 #include "oops/objArrayOop.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "oops/oop.pcgc.inline.hpp" 50 #include "runtime/atomic.inline.hpp" 51 #include "runtime/handles.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/globalDefinitions.hpp" 57 #include "utilities/workgroup.hpp" 58 59 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 60 61 #ifdef _MSC_VER 62 #pragma warning( push ) 63 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 64 #endif 65 ParScanThreadState::ParScanThreadState(Space* to_space_, 66 ParNewGeneration* gen_, 67 Generation* old_gen_, 68 int thread_num_, 69 ObjToScanQueueSet* work_queue_set_, 70 Stack<oop, mtGC>* overflow_stacks_, 71 size_t desired_plab_sz_, 72 ParNewTracer* gc_tracer, 73 ParallelTaskTerminator& term_) : 74 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 75 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 76 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 77 _ageTable(false), // false ==> not the global age table, no perf data. 78 _to_space_alloc_buffer(desired_plab_sz_), 79 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 80 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 81 _older_gen_closure(gen_, this), 82 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 83 &_to_space_root_closure, gen_, &_old_gen_root_closure, 84 work_queue_set_, &term_), 85 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 86 _keep_alive_closure(&_scan_weak_ref_closure), 87 _gc_tracer(gc_tracer), 88 _strong_roots_time(0.0), _term_time(0.0) 89 { 90 #if TASKQUEUE_STATS 91 _term_attempts = 0; 92 _overflow_refills = 0; 93 _overflow_refill_objs = 0; 94 #endif // TASKQUEUE_STATS 95 96 _survivor_chunk_array = 97 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 98 _hash_seed = 17; // Might want to take time-based random value. 99 _start = os::elapsedTime(); 100 _old_gen_closure.set_generation(old_gen_); 101 _old_gen_root_closure.set_generation(old_gen_); 102 } 103 #ifdef _MSC_VER 104 #pragma warning( pop ) 105 #endif 106 107 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 108 size_t plab_word_size) { 109 ChunkArray* sca = survivor_chunk_array(); 110 if (sca != NULL) { 111 // A non-null SCA implies that we want the PLAB data recorded. 112 sca->record_sample(plab_start, plab_word_size); 113 } 114 } 115 116 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 117 return new_obj->is_objArray() && 118 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 119 new_obj != old_obj; 120 } 121 122 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 123 assert(old->is_objArray(), "must be obj array"); 124 assert(old->is_forwarded(), "must be forwarded"); 125 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 126 assert(!old_gen()->is_in(old), "must be in young generation."); 127 128 objArrayOop obj = objArrayOop(old->forwardee()); 129 // Process ParGCArrayScanChunk elements now 130 // and push the remainder back onto queue 131 int start = arrayOop(old)->length(); 132 int end = obj->length(); 133 int remainder = end - start; 134 assert(start <= end, "just checking"); 135 if (remainder > 2 * ParGCArrayScanChunk) { 136 // Test above combines last partial chunk with a full chunk 137 end = start + ParGCArrayScanChunk; 138 arrayOop(old)->set_length(end); 139 // Push remainder. 140 bool ok = work_queue()->push(old); 141 assert(ok, "just popped, push must be okay"); 142 } else { 143 // Restore length so that it can be used if there 144 // is a promotion failure and forwarding pointers 145 // must be removed. 146 arrayOop(old)->set_length(end); 147 } 148 149 // process our set of indices (include header in first chunk) 150 // should make sure end is even (aligned to HeapWord in case of compressed oops) 151 if ((HeapWord *)obj < young_old_boundary()) { 152 // object is in to_space 153 obj->oop_iterate_range(&_to_space_closure, start, end); 154 } else { 155 // object is in old generation 156 obj->oop_iterate_range(&_old_gen_closure, start, end); 157 } 158 } 159 160 161 void ParScanThreadState::trim_queues(int max_size) { 162 ObjToScanQueue* queue = work_queue(); 163 do { 164 while (queue->size() > (juint)max_size) { 165 oop obj_to_scan; 166 if (queue->pop_local(obj_to_scan)) { 167 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 168 if (obj_to_scan->is_objArray() && 169 obj_to_scan->is_forwarded() && 170 obj_to_scan->forwardee() != obj_to_scan) { 171 scan_partial_array_and_push_remainder(obj_to_scan); 172 } else { 173 // object is in to_space 174 obj_to_scan->oop_iterate(&_to_space_closure); 175 } 176 } else { 177 // object is in old generation 178 obj_to_scan->oop_iterate(&_old_gen_closure); 179 } 180 } 181 } 182 // For the case of compressed oops, we have a private, non-shared 183 // overflow stack, so we eagerly drain it so as to more evenly 184 // distribute load early. Note: this may be good to do in 185 // general rather than delay for the final stealing phase. 186 // If applicable, we'll transfer a set of objects over to our 187 // work queue, allowing them to be stolen and draining our 188 // private overflow stack. 189 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 190 } 191 192 bool ParScanThreadState::take_from_overflow_stack() { 193 assert(ParGCUseLocalOverflow, "Else should not call"); 194 assert(young_gen()->overflow_list() == NULL, "Error"); 195 ObjToScanQueue* queue = work_queue(); 196 Stack<oop, mtGC>* const of_stack = overflow_stack(); 197 const size_t num_overflow_elems = of_stack->size(); 198 const size_t space_available = queue->max_elems() - queue->size(); 199 const size_t num_take_elems = MIN3(space_available / 4, 200 ParGCDesiredObjsFromOverflowList, 201 num_overflow_elems); 202 // Transfer the most recent num_take_elems from the overflow 203 // stack to our work queue. 204 for (size_t i = 0; i != num_take_elems; i++) { 205 oop cur = of_stack->pop(); 206 oop obj_to_push = cur->forwardee(); 207 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 208 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 209 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 210 if (should_be_partially_scanned(obj_to_push, cur)) { 211 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 212 obj_to_push = cur; 213 } 214 bool ok = queue->push(obj_to_push); 215 assert(ok, "Should have succeeded"); 216 } 217 assert(young_gen()->overflow_list() == NULL, "Error"); 218 return num_take_elems > 0; // was something transferred? 219 } 220 221 void ParScanThreadState::push_on_overflow_stack(oop p) { 222 assert(ParGCUseLocalOverflow, "Else should not call"); 223 overflow_stack()->push(p); 224 assert(young_gen()->overflow_list() == NULL, "Error"); 225 } 226 227 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz, 228 const oop old, 229 const uint age) { 230 231 // Otherwise, if the object is small enough, try to reallocate the 232 // buffer. 233 HeapWord* obj = NULL; 234 if (!_to_space_full) { 235 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 236 Space* const sp = to_space(); 237 if (word_sz * 100 < 238 ParallelGCBufferWastePct * plab->word_sz()) { 239 // Is small enough; abandon this buffer and start a new one. 240 plab->retire(false, false); 241 size_t buf_size = plab->word_sz(); 242 HeapWord* buf_space = sp->par_allocate(buf_size); 243 if (buf_space == NULL) { 244 const size_t min_bytes = 245 ParGCAllocBuffer::min_size() << LogHeapWordSize; 246 size_t free_bytes = sp->free(); 247 while(buf_space == NULL && free_bytes >= min_bytes) { 248 buf_size = free_bytes >> LogHeapWordSize; 249 assert(buf_size == (size_t)align_object_size(buf_size), 250 "Invariant"); 251 buf_space = sp->par_allocate(buf_size); 252 free_bytes = sp->free(); 253 } 254 } 255 if (buf_space != NULL) { 256 plab->set_word_size(buf_size); 257 plab->set_buf(buf_space); 258 record_survivor_plab(buf_space, buf_size); 259 gc_tracer()->report_promotion_in_new_plab_event(old, word_sz, age, false, 260 buf_size); 261 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 262 // Note that we cannot compare buf_size < word_sz below 263 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 264 assert(obj != NULL || plab->words_remaining() < word_sz, 265 "Else should have been able to allocate"); 266 // It's conceivable that we may be able to use the 267 // buffer we just grabbed for subsequent small requests 268 // even if not for this one. 269 } else { 270 // We're used up. 271 _to_space_full = true; 272 } 273 274 } else { 275 // Too large; allocate the object individually. 276 gc_tracer()->report_promotion_outside_plab_event(old, word_sz, age, false); 277 obj = sp->par_allocate(word_sz); 278 } 279 } 280 return obj; 281 } 282 283 284 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 285 size_t word_sz) { 286 // Is the alloc in the current alloc buffer? 287 if (to_space_alloc_buffer()->contains(obj)) { 288 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 289 "Should contain whole object."); 290 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 291 } else { 292 CollectedHeap::fill_with_object(obj, word_sz); 293 } 294 } 295 296 void ParScanThreadState::print_promotion_failure_size() { 297 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 298 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 299 _thread_num, _promotion_failed_info.first_size()); 300 } 301 } 302 303 class ParScanThreadStateSet: private ResourceArray { 304 public: 305 // Initializes states for the specified number of threads; 306 ParScanThreadStateSet(int num_threads, 307 Space& to_space, 308 ParNewGeneration& gen, 309 Generation& old_gen, 310 ObjToScanQueueSet& queue_set, 311 Stack<oop, mtGC>* overflow_stacks_, 312 size_t desired_plab_sz, 313 ParNewTracer* gc_tracer, 314 ParallelTaskTerminator& term); 315 316 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 317 318 inline ParScanThreadState& thread_state(int i); 319 320 void trace_promotion_failed(YoungGCTracer& gc_tracer); 321 void reset(int active_workers, bool promotion_failed); 322 void flush(); 323 324 #if TASKQUEUE_STATS 325 static void 326 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 327 void print_termination_stats(outputStream* const st = gclog_or_tty); 328 static void 329 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 330 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 331 void reset_stats(); 332 #endif // TASKQUEUE_STATS 333 334 private: 335 ParallelTaskTerminator& _term; 336 ParNewGeneration& _gen; 337 Generation& _next_gen; 338 public: 339 bool is_valid(int id) const { return id < length(); } 340 ParallelTaskTerminator* terminator() { return &_term; } 341 }; 342 343 344 ParScanThreadStateSet::ParScanThreadStateSet( 345 int num_threads, Space& to_space, ParNewGeneration& gen, 346 Generation& old_gen, ObjToScanQueueSet& queue_set, 347 Stack<oop, mtGC>* overflow_stacks, 348 size_t desired_plab_sz, ParNewTracer* gc_tracer, 349 ParallelTaskTerminator& term) 350 : ResourceArray(sizeof(ParScanThreadState), num_threads), 351 _gen(gen), _next_gen(old_gen), _term(term) 352 { 353 assert(num_threads > 0, "sanity check!"); 354 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 355 "overflow_stack allocation mismatch"); 356 // Initialize states. 357 for (int i = 0; i < num_threads; ++i) { 358 new ((ParScanThreadState*)_data + i) 359 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 360 overflow_stacks, desired_plab_sz, gc_tracer, term); 361 } 362 } 363 364 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 365 { 366 assert(i >= 0 && i < length(), "sanity check!"); 367 return ((ParScanThreadState*)_data)[i]; 368 } 369 370 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { 371 for (int i = 0; i < length(); ++i) { 372 if (thread_state(i).promotion_failed()) { 373 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); 374 thread_state(i).promotion_failed_info().reset(); 375 } 376 } 377 } 378 379 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) 380 { 381 _term.reset_for_reuse(active_threads); 382 if (promotion_failed) { 383 for (int i = 0; i < length(); ++i) { 384 thread_state(i).print_promotion_failure_size(); 385 } 386 } 387 } 388 389 #if TASKQUEUE_STATS 390 void 391 ParScanThreadState::reset_stats() 392 { 393 taskqueue_stats().reset(); 394 _term_attempts = 0; 395 _overflow_refills = 0; 396 _overflow_refill_objs = 0; 397 } 398 399 void ParScanThreadStateSet::reset_stats() 400 { 401 for (int i = 0; i < length(); ++i) { 402 thread_state(i).reset_stats(); 403 } 404 } 405 406 void 407 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 408 { 409 st->print_raw_cr("GC Termination Stats"); 410 st->print_raw_cr(" elapsed --strong roots-- " 411 "-------termination-------"); 412 st->print_raw_cr("thr ms ms % " 413 " ms % attempts"); 414 st->print_raw_cr("--- --------- --------- ------ " 415 "--------- ------ --------"); 416 } 417 418 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 419 { 420 print_termination_stats_hdr(st); 421 422 for (int i = 0; i < length(); ++i) { 423 const ParScanThreadState & pss = thread_state(i); 424 const double elapsed_ms = pss.elapsed_time() * 1000.0; 425 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 426 const double term_ms = pss.term_time() * 1000.0; 427 st->print_cr("%3d %9.2f %9.2f %6.2f " 428 "%9.2f %6.2f " SIZE_FORMAT_W(8), 429 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 430 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 431 } 432 } 433 434 // Print stats related to work queue activity. 435 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 436 { 437 st->print_raw_cr("GC Task Stats"); 438 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 439 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 440 } 441 442 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 443 { 444 print_taskqueue_stats_hdr(st); 445 446 TaskQueueStats totals; 447 for (int i = 0; i < length(); ++i) { 448 const ParScanThreadState & pss = thread_state(i); 449 const TaskQueueStats & stats = pss.taskqueue_stats(); 450 st->print("%3d ", i); stats.print(st); st->cr(); 451 totals += stats; 452 453 if (pss.overflow_refills() > 0) { 454 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 455 SIZE_FORMAT_W(10) " overflow objects", 456 pss.overflow_refills(), pss.overflow_refill_objs()); 457 } 458 } 459 st->print("tot "); totals.print(st); st->cr(); 460 461 DEBUG_ONLY(totals.verify()); 462 } 463 #endif // TASKQUEUE_STATS 464 465 void ParScanThreadStateSet::flush() 466 { 467 // Work in this loop should be kept as lightweight as 468 // possible since this might otherwise become a bottleneck 469 // to scaling. Should we add heavy-weight work into this 470 // loop, consider parallelizing the loop into the worker threads. 471 for (int i = 0; i < length(); ++i) { 472 ParScanThreadState& par_scan_state = thread_state(i); 473 474 // Flush stats related to To-space PLAB activity and 475 // retire the last buffer. 476 par_scan_state.to_space_alloc_buffer()-> 477 flush_stats_and_retire(_gen.plab_stats(), 478 true /* end_of_gc */, 479 false /* retain */); 480 481 // Every thread has its own age table. We need to merge 482 // them all into one. 483 ageTable *local_table = par_scan_state.age_table(); 484 _gen.age_table()->merge(local_table); 485 486 // Inform old gen that we're done. 487 _next_gen.par_promote_alloc_done(i); 488 _next_gen.par_oop_since_save_marks_iterate_done(i); 489 } 490 491 if (UseConcMarkSweepGC) { 492 // We need to call this even when ResizeOldPLAB is disabled 493 // so as to avoid breaking some asserts. While we may be able 494 // to avoid this by reorganizing the code a bit, I am loathe 495 // to do that unless we find cases where ergo leads to bad 496 // performance. 497 CFLS_LAB::compute_desired_plab_size(); 498 } 499 } 500 501 ParScanClosure::ParScanClosure(ParNewGeneration* g, 502 ParScanThreadState* par_scan_state) : 503 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) 504 { 505 assert(_g->level() == 0, "Optimized for youngest generation"); 506 _boundary = _g->reserved().end(); 507 } 508 509 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 510 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 511 512 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 513 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 514 515 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 516 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 517 518 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 519 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 520 521 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 522 ParScanThreadState* par_scan_state) 523 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 524 {} 525 526 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 527 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 528 529 #ifdef WIN32 530 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 531 #endif 532 533 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 534 ParScanThreadState* par_scan_state_, 535 ParScanWithoutBarrierClosure* to_space_closure_, 536 ParScanWithBarrierClosure* old_gen_closure_, 537 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 538 ParNewGeneration* par_gen_, 539 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 540 ObjToScanQueueSet* task_queues_, 541 ParallelTaskTerminator* terminator_) : 542 543 _par_scan_state(par_scan_state_), 544 _to_space_closure(to_space_closure_), 545 _old_gen_closure(old_gen_closure_), 546 _to_space_root_closure(to_space_root_closure_), 547 _old_gen_root_closure(old_gen_root_closure_), 548 _par_gen(par_gen_), 549 _task_queues(task_queues_), 550 _terminator(terminator_) 551 {} 552 553 void ParEvacuateFollowersClosure::do_void() { 554 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 555 556 while (true) { 557 558 // Scan to-space and old-gen objs until we run out of both. 559 oop obj_to_scan; 560 par_scan_state()->trim_queues(0); 561 562 // We have no local work, attempt to steal from other threads. 563 564 // attempt to steal work from promoted. 565 if (task_queues()->steal(par_scan_state()->thread_num(), 566 par_scan_state()->hash_seed(), 567 obj_to_scan)) { 568 bool res = work_q->push(obj_to_scan); 569 assert(res, "Empty queue should have room for a push."); 570 571 // if successful, goto Start. 572 continue; 573 574 // try global overflow list. 575 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 576 continue; 577 } 578 579 // Otherwise, offer termination. 580 par_scan_state()->start_term_time(); 581 if (terminator()->offer_termination()) break; 582 par_scan_state()->end_term_time(); 583 } 584 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 585 "Broken overflow list?"); 586 // Finish the last termination pause. 587 par_scan_state()->end_term_time(); 588 } 589 590 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 591 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 592 AbstractGangTask("ParNewGeneration collection"), 593 _gen(gen), _next_gen(next_gen), 594 _young_old_boundary(young_old_boundary), 595 _state_set(state_set) 596 {} 597 598 // Reset the terminator for the given number of 599 // active threads. 600 void ParNewGenTask::set_for_termination(int active_workers) { 601 _state_set->reset(active_workers, _gen->promotion_failed()); 602 // Should the heap be passed in? There's only 1 for now so 603 // grab it instead. 604 GenCollectedHeap* gch = GenCollectedHeap::heap(); 605 gch->set_n_termination(active_workers); 606 } 607 608 void ParNewGenTask::work(uint worker_id) { 609 GenCollectedHeap* gch = GenCollectedHeap::heap(); 610 // Since this is being done in a separate thread, need new resource 611 // and handle marks. 612 ResourceMark rm; 613 HandleMark hm; 614 // We would need multiple old-gen queues otherwise. 615 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 616 617 Generation* old_gen = gch->next_gen(_gen); 618 619 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 620 assert(_state_set->is_valid(worker_id), "Should not have been called"); 621 622 par_scan_state.set_young_old_boundary(_young_old_boundary); 623 624 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 625 gch->rem_set()->klass_rem_set()); 626 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 627 &par_scan_state.to_space_root_closure(), 628 false); 629 630 par_scan_state.start_strong_roots(); 631 gch->gen_process_roots(_gen->level(), 632 true, // Process younger gens, if any, 633 // as strong roots. 634 false, // no scope; this is parallel code 635 SharedHeap::SO_ScavengeCodeCache, 636 GenCollectedHeap::StrongAndWeakRoots, 637 &par_scan_state.to_space_root_closure(), 638 &par_scan_state.older_gen_closure(), 639 &cld_scan_closure); 640 641 par_scan_state.end_strong_roots(); 642 643 // "evacuate followers". 644 par_scan_state.evacuate_followers_closure().do_void(); 645 } 646 647 #ifdef _MSC_VER 648 #pragma warning( push ) 649 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 650 #endif 651 ParNewGeneration:: 652 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 653 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 654 _overflow_list(NULL), 655 _is_alive_closure(this), 656 _plab_stats(YoungPLABSize, PLABWeight) 657 { 658 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 659 NOT_PRODUCT(_num_par_pushes = 0;) 660 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 661 guarantee(_task_queues != NULL, "task_queues allocation failure."); 662 663 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 664 ObjToScanQueue *q = new ObjToScanQueue(); 665 guarantee(q != NULL, "work_queue Allocation failure."); 666 _task_queues->register_queue(i1, q); 667 } 668 669 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 670 _task_queues->queue(i2)->initialize(); 671 672 _overflow_stacks = NULL; 673 if (ParGCUseLocalOverflow) { 674 675 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 676 // with ',' 677 typedef Stack<oop, mtGC> GCOopStack; 678 679 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 680 for (size_t i = 0; i < ParallelGCThreads; ++i) { 681 new (_overflow_stacks + i) Stack<oop, mtGC>(); 682 } 683 } 684 685 if (UsePerfData) { 686 EXCEPTION_MARK; 687 ResourceMark rm; 688 689 const char* cname = 690 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 691 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 692 ParallelGCThreads, CHECK); 693 } 694 } 695 #ifdef _MSC_VER 696 #pragma warning( pop ) 697 #endif 698 699 // ParNewGeneration:: 700 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 701 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 702 703 template <class T> 704 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 705 #ifdef ASSERT 706 { 707 assert(!oopDesc::is_null(*p), "expected non-null ref"); 708 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 709 // We never expect to see a null reference being processed 710 // as a weak reference. 711 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 712 } 713 #endif // ASSERT 714 715 _par_cl->do_oop_nv(p); 716 717 if (Universe::heap()->is_in_reserved(p)) { 718 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 719 _rs->write_ref_field_gc_par(p, obj); 720 } 721 } 722 723 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 724 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 725 726 // ParNewGeneration:: 727 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 728 DefNewGeneration::KeepAliveClosure(cl) {} 729 730 template <class T> 731 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 732 #ifdef ASSERT 733 { 734 assert(!oopDesc::is_null(*p), "expected non-null ref"); 735 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 736 // We never expect to see a null reference being processed 737 // as a weak reference. 738 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 739 } 740 #endif // ASSERT 741 742 _cl->do_oop_nv(p); 743 744 if (Universe::heap()->is_in_reserved(p)) { 745 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 746 _rs->write_ref_field_gc_par(p, obj); 747 } 748 } 749 750 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 751 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 752 753 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 754 T heap_oop = oopDesc::load_heap_oop(p); 755 if (!oopDesc::is_null(heap_oop)) { 756 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 757 if ((HeapWord*)obj < _boundary) { 758 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 759 oop new_obj = obj->is_forwarded() 760 ? obj->forwardee() 761 : _g->DefNewGeneration::copy_to_survivor_space(obj); 762 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 763 } 764 if (_gc_barrier) { 765 // If p points to a younger generation, mark the card. 766 if ((HeapWord*)obj < _gen_boundary) { 767 _rs->write_ref_field_gc_par(p, obj); 768 } 769 } 770 } 771 } 772 773 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 774 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 775 776 class ParNewRefProcTaskProxy: public AbstractGangTask { 777 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 778 public: 779 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 780 Generation& next_gen, 781 HeapWord* young_old_boundary, 782 ParScanThreadStateSet& state_set); 783 784 private: 785 virtual void work(uint worker_id); 786 virtual void set_for_termination(int active_workers) { 787 _state_set.terminator()->reset_for_reuse(active_workers); 788 } 789 private: 790 ParNewGeneration& _gen; 791 ProcessTask& _task; 792 Generation& _next_gen; 793 HeapWord* _young_old_boundary; 794 ParScanThreadStateSet& _state_set; 795 }; 796 797 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 798 ProcessTask& task, ParNewGeneration& gen, 799 Generation& next_gen, 800 HeapWord* young_old_boundary, 801 ParScanThreadStateSet& state_set) 802 : AbstractGangTask("ParNewGeneration parallel reference processing"), 803 _gen(gen), 804 _task(task), 805 _next_gen(next_gen), 806 _young_old_boundary(young_old_boundary), 807 _state_set(state_set) 808 { 809 } 810 811 void ParNewRefProcTaskProxy::work(uint worker_id) 812 { 813 ResourceMark rm; 814 HandleMark hm; 815 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 816 par_scan_state.set_young_old_boundary(_young_old_boundary); 817 _task.work(worker_id, par_scan_state.is_alive_closure(), 818 par_scan_state.keep_alive_closure(), 819 par_scan_state.evacuate_followers_closure()); 820 } 821 822 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 823 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 824 EnqueueTask& _task; 825 826 public: 827 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 828 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 829 _task(task) 830 { } 831 832 virtual void work(uint worker_id) 833 { 834 _task.work(worker_id); 835 } 836 }; 837 838 839 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 840 { 841 GenCollectedHeap* gch = GenCollectedHeap::heap(); 842 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 843 "not a generational heap"); 844 FlexibleWorkGang* workers = gch->workers(); 845 assert(workers != NULL, "Need parallel worker threads."); 846 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 847 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 848 _generation.reserved().end(), _state_set); 849 workers->run_task(&rp_task); 850 _state_set.reset(0 /* bad value in debug if not reset */, 851 _generation.promotion_failed()); 852 } 853 854 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 855 { 856 GenCollectedHeap* gch = GenCollectedHeap::heap(); 857 FlexibleWorkGang* workers = gch->workers(); 858 assert(workers != NULL, "Need parallel worker threads."); 859 ParNewRefEnqueueTaskProxy enq_task(task); 860 workers->run_task(&enq_task); 861 } 862 863 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 864 { 865 _state_set.flush(); 866 GenCollectedHeap* gch = GenCollectedHeap::heap(); 867 gch->set_par_threads(0); // 0 ==> non-parallel. 868 gch->save_marks(); 869 } 870 871 ScanClosureWithParBarrier:: 872 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 873 ScanClosure(g, gc_barrier) {} 874 875 EvacuateFollowersClosureGeneral:: 876 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 877 OopsInGenClosure* cur, 878 OopsInGenClosure* older) : 879 _gch(gch), _level(level), 880 _scan_cur_or_nonheap(cur), _scan_older(older) 881 {} 882 883 void EvacuateFollowersClosureGeneral::do_void() { 884 do { 885 // Beware: this call will lead to closure applications via virtual 886 // calls. 887 _gch->oop_since_save_marks_iterate(_level, 888 _scan_cur_or_nonheap, 889 _scan_older); 890 } while (!_gch->no_allocs_since_save_marks(_level)); 891 } 892 893 894 // A Generation that does parallel young-gen collection. 895 896 bool ParNewGeneration::_avoid_promotion_undo = false; 897 898 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { 899 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 900 _promo_failure_scan_stack.clear(true); // Clear cached segments. 901 902 remove_forwarding_pointers(); 903 if (PrintGCDetails) { 904 gclog_or_tty->print(" (promotion failed)"); 905 } 906 // All the spaces are in play for mark-sweep. 907 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 908 from()->set_next_compaction_space(to()); 909 gch->set_incremental_collection_failed(); 910 // Inform the next generation that a promotion failure occurred. 911 _next_gen->promotion_failure_occurred(); 912 913 // Trace promotion failure in the parallel GC threads 914 thread_state_set.trace_promotion_failed(gc_tracer); 915 // Single threaded code may have reported promotion failure to the global state 916 if (_promotion_failed_info.has_failed()) { 917 gc_tracer.report_promotion_failed(_promotion_failed_info); 918 } 919 // Reset the PromotionFailureALot counters. 920 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 921 } 922 923 void ParNewGeneration::collect(bool full, 924 bool clear_all_soft_refs, 925 size_t size, 926 bool is_tlab) { 927 assert(full || size > 0, "otherwise we don't want to collect"); 928 929 GenCollectedHeap* gch = GenCollectedHeap::heap(); 930 931 _gc_timer->register_gc_start(); 932 933 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 934 "not a CMS generational heap"); 935 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 936 FlexibleWorkGang* workers = gch->workers(); 937 assert(workers != NULL, "Need workgang for parallel work"); 938 int active_workers = 939 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 940 workers->active_workers(), 941 Threads::number_of_non_daemon_threads()); 942 workers->set_active_workers(active_workers); 943 assert(gch->n_gens() == 2, 944 "Par collection currently only works with single older gen."); 945 _next_gen = gch->next_gen(this); 946 // Do we have to avoid promotion_undo? 947 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 948 set_avoid_promotion_undo(true); 949 } 950 951 // If the next generation is too full to accommodate worst-case promotion 952 // from this generation, pass on collection; let the next generation 953 // do it. 954 if (!collection_attempt_is_safe()) { 955 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 956 return; 957 } 958 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 959 960 ParNewTracer gc_tracer; 961 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 962 gch->trace_heap_before_gc(&gc_tracer); 963 964 init_assuming_no_promotion_failure(); 965 966 if (UseAdaptiveSizePolicy) { 967 set_survivor_overflow(false); 968 size_policy->minor_collection_begin(); 969 } 970 971 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 972 // Capture heap used before collection (for printing). 973 size_t gch_prev_used = gch->used(); 974 975 SpecializationStats::clear(); 976 977 age_table()->clear(); 978 to()->clear(SpaceDecorator::Mangle); 979 980 gch->save_marks(); 981 assert(workers != NULL, "Need parallel worker threads."); 982 int n_workers = active_workers; 983 984 // Set the correct parallelism (number of queues) in the reference processor 985 ref_processor()->set_active_mt_degree(n_workers); 986 987 // Always set the terminator for the active number of workers 988 // because only those workers go through the termination protocol. 989 ParallelTaskTerminator _term(n_workers, task_queues()); 990 ParScanThreadStateSet thread_state_set(workers->active_workers(), 991 *to(), *this, *_next_gen, *task_queues(), 992 _overflow_stacks, desired_plab_sz(), 993 &gc_tracer, _term); 994 995 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 996 gch->set_par_threads(n_workers); 997 gch->rem_set()->prepare_for_younger_refs_iterate(true); 998 // It turns out that even when we're using 1 thread, doing the work in a 999 // separate thread causes wide variance in run times. We can't help this 1000 // in the multi-threaded case, but we special-case n=1 here to get 1001 // repeatable measurements of the 1-thread overhead of the parallel code. 1002 if (n_workers > 1) { 1003 GenCollectedHeap::StrongRootsScope srs(gch); 1004 workers->run_task(&tsk); 1005 } else { 1006 GenCollectedHeap::StrongRootsScope srs(gch); 1007 tsk.work(0); 1008 } 1009 thread_state_set.reset(0 /* Bad value in debug if not reset */, 1010 promotion_failed()); 1011 1012 // Process (weak) reference objects found during scavenge. 1013 ReferenceProcessor* rp = ref_processor(); 1014 IsAliveClosure is_alive(this); 1015 ScanWeakRefClosure scan_weak_ref(this); 1016 KeepAliveClosure keep_alive(&scan_weak_ref); 1017 ScanClosure scan_without_gc_barrier(this, false); 1018 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 1019 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 1020 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 1021 &scan_without_gc_barrier, &scan_with_gc_barrier); 1022 rp->setup_policy(clear_all_soft_refs); 1023 // Can the mt_degree be set later (at run_task() time would be best)? 1024 rp->set_active_mt_degree(active_workers); 1025 ReferenceProcessorStats stats; 1026 if (rp->processing_is_mt()) { 1027 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1028 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1029 &evacuate_followers, &task_executor, 1030 _gc_timer, gc_tracer.gc_id()); 1031 } else { 1032 thread_state_set.flush(); 1033 gch->set_par_threads(0); // 0 ==> non-parallel. 1034 gch->save_marks(); 1035 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1036 &evacuate_followers, NULL, 1037 _gc_timer, gc_tracer.gc_id()); 1038 } 1039 gc_tracer.report_gc_reference_stats(stats); 1040 if (!promotion_failed()) { 1041 // Swap the survivor spaces. 1042 eden()->clear(SpaceDecorator::Mangle); 1043 from()->clear(SpaceDecorator::Mangle); 1044 if (ZapUnusedHeapArea) { 1045 // This is now done here because of the piece-meal mangling which 1046 // can check for valid mangling at intermediate points in the 1047 // collection(s). When a minor collection fails to collect 1048 // sufficient space resizing of the young generation can occur 1049 // an redistribute the spaces in the young generation. Mangle 1050 // here so that unzapped regions don't get distributed to 1051 // other spaces. 1052 to()->mangle_unused_area(); 1053 } 1054 swap_spaces(); 1055 1056 // A successful scavenge should restart the GC time limit count which is 1057 // for full GC's. 1058 size_policy->reset_gc_overhead_limit_count(); 1059 1060 assert(to()->is_empty(), "to space should be empty now"); 1061 1062 adjust_desired_tenuring_threshold(); 1063 } else { 1064 handle_promotion_failed(gch, thread_state_set, gc_tracer); 1065 } 1066 // set new iteration safe limit for the survivor spaces 1067 from()->set_concurrent_iteration_safe_limit(from()->top()); 1068 to()->set_concurrent_iteration_safe_limit(to()->top()); 1069 1070 if (ResizePLAB) { 1071 plab_stats()->adjust_desired_plab_sz(n_workers); 1072 } 1073 1074 if (PrintGC && !PrintGCDetails) { 1075 gch->print_heap_change(gch_prev_used); 1076 } 1077 1078 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1079 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1080 1081 if (UseAdaptiveSizePolicy) { 1082 size_policy->minor_collection_end(gch->gc_cause()); 1083 size_policy->avg_survived()->sample(from()->used()); 1084 } 1085 1086 // We need to use a monotonically non-decreasing time in ms 1087 // or we will see time-warp warnings and os::javaTimeMillis() 1088 // does not guarantee monotonicity. 1089 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1090 update_time_of_last_gc(now); 1091 1092 SpecializationStats::print(); 1093 1094 rp->set_enqueuing_is_done(true); 1095 if (rp->processing_is_mt()) { 1096 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1097 rp->enqueue_discovered_references(&task_executor); 1098 } else { 1099 rp->enqueue_discovered_references(NULL); 1100 } 1101 rp->verify_no_references_recorded(); 1102 1103 gch->trace_heap_after_gc(&gc_tracer); 1104 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1105 1106 _gc_timer->register_gc_end(); 1107 1108 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1109 } 1110 1111 static int sum; 1112 void ParNewGeneration::waste_some_time() { 1113 for (int i = 0; i < 100; i++) { 1114 sum += i; 1115 } 1116 } 1117 1118 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1119 1120 // Because of concurrency, there are times where an object for which 1121 // "is_forwarded()" is true contains an "interim" forwarding pointer 1122 // value. Such a value will soon be overwritten with a real value. 1123 // This method requires "obj" to have a forwarding pointer, and waits, if 1124 // necessary for a real one to be inserted, and returns it. 1125 1126 oop ParNewGeneration::real_forwardee(oop obj) { 1127 oop forward_ptr = obj->forwardee(); 1128 if (forward_ptr != ClaimedForwardPtr) { 1129 return forward_ptr; 1130 } else { 1131 return real_forwardee_slow(obj); 1132 } 1133 } 1134 1135 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1136 // Spin-read if it is claimed but not yet written by another thread. 1137 oop forward_ptr = obj->forwardee(); 1138 while (forward_ptr == ClaimedForwardPtr) { 1139 waste_some_time(); 1140 assert(obj->is_forwarded(), "precondition"); 1141 forward_ptr = obj->forwardee(); 1142 } 1143 return forward_ptr; 1144 } 1145 1146 #ifdef ASSERT 1147 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1148 return 1149 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1150 || Universe::heap()->is_in_reserved(p); 1151 } 1152 #endif 1153 1154 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1155 if (m->must_be_preserved_for_promotion_failure(obj)) { 1156 // We should really have separate per-worker stacks, rather 1157 // than use locking of a common pair of stacks. 1158 MutexLocker ml(ParGCRareEvent_lock); 1159 preserve_mark(obj, m); 1160 } 1161 } 1162 1163 // Multiple GC threads may try to promote an object. If the object 1164 // is successfully promoted, a forwarding pointer will be installed in 1165 // the object in the young generation. This method claims the right 1166 // to install the forwarding pointer before it copies the object, 1167 // thus avoiding the need to undo the copy as in 1168 // copy_to_survivor_space_avoiding_with_undo. 1169 1170 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1171 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1172 // In the sequential version, this assert also says that the object is 1173 // not forwarded. That might not be the case here. It is the case that 1174 // the caller observed it to be not forwarded at some time in the past. 1175 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1176 1177 // The sequential code read "old->age()" below. That doesn't work here, 1178 // since the age is in the mark word, and that might be overwritten with 1179 // a forwarding pointer by a parallel thread. So we must save the mark 1180 // word in a local and then analyze it. 1181 oopDesc dummyOld; 1182 dummyOld.set_mark(m); 1183 assert(!dummyOld.is_forwarded(), 1184 "should not be called with forwarding pointer mark word."); 1185 1186 oop new_obj = NULL; 1187 oop forward_ptr; 1188 1189 // Try allocating obj in to-space (unless too old) 1190 if (dummyOld.age() < tenuring_threshold()) { 1191 new_obj = (oop)par_scan_state->alloc_in_to_space(sz, old, dummyOld.age()); 1192 if (new_obj == NULL) { 1193 set_survivor_overflow(true); 1194 } 1195 } 1196 1197 if (new_obj == NULL) { 1198 // Either to-space is full or we decided to promote 1199 // try allocating obj tenured 1200 1201 // Attempt to install a null forwarding pointer (atomically), 1202 // to claim the right to install the real forwarding pointer. 1203 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1204 if (forward_ptr != NULL) { 1205 // someone else beat us to it. 1206 return real_forwardee(old); 1207 } 1208 1209 new_obj = _next_gen->par_promote(par_scan_state->gc_tracer(), 1210 par_scan_state->thread_num(), 1211 old, m, sz); 1212 1213 if (new_obj == NULL) { 1214 // promotion failed, forward to self 1215 _promotion_failed = true; 1216 new_obj = old; 1217 1218 preserve_mark_if_necessary(old, m); 1219 par_scan_state->register_promotion_failure(sz); 1220 } 1221 1222 old->forward_to(new_obj); 1223 forward_ptr = NULL; 1224 } else { 1225 // Is in to-space; do copying ourselves. 1226 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1227 forward_ptr = old->forward_to_atomic(new_obj); 1228 // Restore the mark word copied above. 1229 new_obj->set_mark(m); 1230 // Increment age if obj still in new generation 1231 new_obj->incr_age(); 1232 par_scan_state->age_table()->add(new_obj, sz); 1233 } 1234 assert(new_obj != NULL, "just checking"); 1235 1236 #ifndef PRODUCT 1237 // This code must come after the CAS test, or it will print incorrect 1238 // information. 1239 if (TraceScavenge) { 1240 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1241 is_in_reserved(new_obj) ? "copying" : "tenuring", 1242 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); 1243 } 1244 #endif 1245 1246 if (forward_ptr == NULL) { 1247 oop obj_to_push = new_obj; 1248 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1249 // Length field used as index of next element to be scanned. 1250 // Real length can be obtained from real_forwardee() 1251 arrayOop(old)->set_length(0); 1252 obj_to_push = old; 1253 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1254 "push forwarded object"); 1255 } 1256 // Push it on one of the queues of to-be-scanned objects. 1257 bool simulate_overflow = false; 1258 NOT_PRODUCT( 1259 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1260 // simulate a stack overflow 1261 simulate_overflow = true; 1262 } 1263 ) 1264 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1265 // Add stats for overflow pushes. 1266 if (Verbose && PrintGCDetails) { 1267 gclog_or_tty->print("queue overflow!\n"); 1268 } 1269 push_on_overflow_list(old, par_scan_state); 1270 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1271 } 1272 1273 return new_obj; 1274 } 1275 1276 // Oops. Someone beat us to it. Undo the allocation. Where did we 1277 // allocate it? 1278 if (is_in_reserved(new_obj)) { 1279 // Must be in to_space. 1280 assert(to()->is_in_reserved(new_obj), "Checking"); 1281 if (forward_ptr == ClaimedForwardPtr) { 1282 // Wait to get the real forwarding pointer value. 1283 forward_ptr = real_forwardee(old); 1284 } 1285 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1286 } 1287 1288 return forward_ptr; 1289 } 1290 1291 1292 // Multiple GC threads may try to promote the same object. If two 1293 // or more GC threads copy the object, only one wins the race to install 1294 // the forwarding pointer. The other threads have to undo their copy. 1295 1296 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1297 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1298 1299 // In the sequential version, this assert also says that the object is 1300 // not forwarded. That might not be the case here. It is the case that 1301 // the caller observed it to be not forwarded at some time in the past. 1302 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1303 1304 // The sequential code read "old->age()" below. That doesn't work here, 1305 // since the age is in the mark word, and that might be overwritten with 1306 // a forwarding pointer by a parallel thread. So we must save the mark 1307 // word here, install it in a local oopDesc, and then analyze it. 1308 oopDesc dummyOld; 1309 dummyOld.set_mark(m); 1310 assert(!dummyOld.is_forwarded(), 1311 "should not be called with forwarding pointer mark word."); 1312 1313 bool failed_to_promote = false; 1314 oop new_obj = NULL; 1315 oop forward_ptr; 1316 1317 // Try allocating obj in to-space (unless too old) 1318 if (dummyOld.age() < tenuring_threshold()) { 1319 new_obj = (oop)par_scan_state->alloc_in_to_space(sz, old, dummyOld.age()); 1320 if (new_obj == NULL) { 1321 set_survivor_overflow(true); 1322 } 1323 } 1324 1325 if (new_obj == NULL) { 1326 // Either to-space is full or we decided to promote 1327 // try allocating obj tenured 1328 new_obj = _next_gen->par_promote(par_scan_state->gc_tracer(), 1329 par_scan_state->thread_num(), 1330 old, m, sz); 1331 1332 if (new_obj == NULL) { 1333 // promotion failed, forward to self 1334 forward_ptr = old->forward_to_atomic(old); 1335 new_obj = old; 1336 1337 if (forward_ptr != NULL) { 1338 return forward_ptr; // someone else succeeded 1339 } 1340 1341 _promotion_failed = true; 1342 failed_to_promote = true; 1343 1344 preserve_mark_if_necessary(old, m); 1345 par_scan_state->register_promotion_failure(sz); 1346 } 1347 } else { 1348 // Is in to-space; do copying ourselves. 1349 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1350 // Restore the mark word copied above. 1351 new_obj->set_mark(m); 1352 // Increment age if new_obj still in new generation 1353 new_obj->incr_age(); 1354 par_scan_state->age_table()->add(new_obj, sz); 1355 } 1356 assert(new_obj != NULL, "just checking"); 1357 1358 #ifndef PRODUCT 1359 // This code must come after the CAS test, or it will print incorrect 1360 // information. 1361 if (TraceScavenge) { 1362 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1363 is_in_reserved(new_obj) ? "copying" : "tenuring", 1364 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); 1365 } 1366 #endif 1367 1368 // Now attempt to install the forwarding pointer (atomically). 1369 // We have to copy the mark word before overwriting with forwarding 1370 // ptr, so we can restore it below in the copy. 1371 if (!failed_to_promote) { 1372 forward_ptr = old->forward_to_atomic(new_obj); 1373 } 1374 1375 if (forward_ptr == NULL) { 1376 oop obj_to_push = new_obj; 1377 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1378 // Length field used as index of next element to be scanned. 1379 // Real length can be obtained from real_forwardee() 1380 arrayOop(old)->set_length(0); 1381 obj_to_push = old; 1382 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1383 "push forwarded object"); 1384 } 1385 // Push it on one of the queues of to-be-scanned objects. 1386 bool simulate_overflow = false; 1387 NOT_PRODUCT( 1388 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1389 // simulate a stack overflow 1390 simulate_overflow = true; 1391 } 1392 ) 1393 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1394 // Add stats for overflow pushes. 1395 push_on_overflow_list(old, par_scan_state); 1396 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1397 } 1398 1399 return new_obj; 1400 } 1401 1402 // Oops. Someone beat us to it. Undo the allocation. Where did we 1403 // allocate it? 1404 if (is_in_reserved(new_obj)) { 1405 // Must be in to_space. 1406 assert(to()->is_in_reserved(new_obj), "Checking"); 1407 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1408 } else { 1409 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1410 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1411 (HeapWord*)new_obj, sz); 1412 } 1413 1414 return forward_ptr; 1415 } 1416 1417 #ifndef PRODUCT 1418 // It's OK to call this multi-threaded; the worst thing 1419 // that can happen is that we'll get a bunch of closely 1420 // spaced simulated overflows, but that's OK, in fact 1421 // probably good as it would exercise the overflow code 1422 // under contention. 1423 bool ParNewGeneration::should_simulate_overflow() { 1424 if (_overflow_counter-- <= 0) { // just being defensive 1425 _overflow_counter = ParGCWorkQueueOverflowInterval; 1426 return true; 1427 } else { 1428 return false; 1429 } 1430 } 1431 #endif 1432 1433 // In case we are using compressed oops, we need to be careful. 1434 // If the object being pushed is an object array, then its length 1435 // field keeps track of the "grey boundary" at which the next 1436 // incremental scan will be done (see ParGCArrayScanChunk). 1437 // When using compressed oops, this length field is kept in the 1438 // lower 32 bits of the erstwhile klass word and cannot be used 1439 // for the overflow chaining pointer (OCP below). As such the OCP 1440 // would itself need to be compressed into the top 32-bits in this 1441 // case. Unfortunately, see below, in the event that we have a 1442 // promotion failure, the node to be pushed on the list can be 1443 // outside of the Java heap, so the heap-based pointer compression 1444 // would not work (we would have potential aliasing between C-heap 1445 // and Java-heap pointers). For this reason, when using compressed 1446 // oops, we simply use a worker-thread-local, non-shared overflow 1447 // list in the form of a growable array, with a slightly different 1448 // overflow stack draining strategy. If/when we start using fat 1449 // stacks here, we can go back to using (fat) pointer chains 1450 // (although some performance comparisons would be useful since 1451 // single global lists have their own performance disadvantages 1452 // as we were made painfully aware not long ago, see 6786503). 1453 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1454 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1455 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1456 if (ParGCUseLocalOverflow) { 1457 // In the case of compressed oops, we use a private, not-shared 1458 // overflow stack. 1459 par_scan_state->push_on_overflow_stack(from_space_obj); 1460 } else { 1461 assert(!UseCompressedOops, "Error"); 1462 // if the object has been forwarded to itself, then we cannot 1463 // use the klass pointer for the linked list. Instead we have 1464 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1465 // XXX This is horribly inefficient when a promotion failure occurs 1466 // and should be fixed. XXX FIX ME !!! 1467 #ifndef PRODUCT 1468 Atomic::inc_ptr(&_num_par_pushes); 1469 assert(_num_par_pushes > 0, "Tautology"); 1470 #endif 1471 if (from_space_obj->forwardee() == from_space_obj) { 1472 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1473 listhead->forward_to(from_space_obj); 1474 from_space_obj = listhead; 1475 } 1476 oop observed_overflow_list = _overflow_list; 1477 oop cur_overflow_list; 1478 do { 1479 cur_overflow_list = observed_overflow_list; 1480 if (cur_overflow_list != BUSY) { 1481 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1482 } else { 1483 from_space_obj->set_klass_to_list_ptr(NULL); 1484 } 1485 observed_overflow_list = 1486 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1487 } while (cur_overflow_list != observed_overflow_list); 1488 } 1489 } 1490 1491 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1492 bool res; 1493 1494 if (ParGCUseLocalOverflow) { 1495 res = par_scan_state->take_from_overflow_stack(); 1496 } else { 1497 assert(!UseCompressedOops, "Error"); 1498 res = take_from_overflow_list_work(par_scan_state); 1499 } 1500 return res; 1501 } 1502 1503 1504 // *NOTE*: The overflow list manipulation code here and 1505 // in CMSCollector:: are very similar in shape, 1506 // except that in the CMS case we thread the objects 1507 // directly into the list via their mark word, and do 1508 // not need to deal with special cases below related 1509 // to chunking of object arrays and promotion failure 1510 // handling. 1511 // CR 6797058 has been filed to attempt consolidation of 1512 // the common code. 1513 // Because of the common code, if you make any changes in 1514 // the code below, please check the CMS version to see if 1515 // similar changes might be needed. 1516 // See CMSCollector::par_take_from_overflow_list() for 1517 // more extensive documentation comments. 1518 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1519 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1520 // How many to take? 1521 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1522 (size_t)ParGCDesiredObjsFromOverflowList); 1523 1524 assert(!UseCompressedOops, "Error"); 1525 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1526 if (_overflow_list == NULL) return false; 1527 1528 // Otherwise, there was something there; try claiming the list. 1529 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1530 // Trim off a prefix of at most objsFromOverflow items 1531 Thread* tid = Thread::current(); 1532 size_t spin_count = (size_t)ParallelGCThreads; 1533 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1534 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1535 // someone grabbed it before we did ... 1536 // ... we spin for a short while... 1537 os::sleep(tid, sleep_time_millis, false); 1538 if (_overflow_list == NULL) { 1539 // nothing left to take 1540 return false; 1541 } else if (_overflow_list != BUSY) { 1542 // try and grab the prefix 1543 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1544 } 1545 } 1546 if (prefix == NULL || prefix == BUSY) { 1547 // Nothing to take or waited long enough 1548 if (prefix == NULL) { 1549 // Write back the NULL in case we overwrote it with BUSY above 1550 // and it is still the same value. 1551 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1552 } 1553 return false; 1554 } 1555 assert(prefix != NULL && prefix != BUSY, "Error"); 1556 size_t i = 1; 1557 oop cur = prefix; 1558 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1559 i++; cur = cur->list_ptr_from_klass(); 1560 } 1561 1562 // Reattach remaining (suffix) to overflow list 1563 if (cur->klass_or_null() == NULL) { 1564 // Write back the NULL in lieu of the BUSY we wrote 1565 // above and it is still the same value. 1566 if (_overflow_list == BUSY) { 1567 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1568 } 1569 } else { 1570 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1571 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1572 cur->set_klass_to_list_ptr(NULL); // break off suffix 1573 // It's possible that the list is still in the empty(busy) state 1574 // we left it in a short while ago; in that case we may be 1575 // able to place back the suffix. 1576 oop observed_overflow_list = _overflow_list; 1577 oop cur_overflow_list = observed_overflow_list; 1578 bool attached = false; 1579 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1580 observed_overflow_list = 1581 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1582 if (cur_overflow_list == observed_overflow_list) { 1583 attached = true; 1584 break; 1585 } else cur_overflow_list = observed_overflow_list; 1586 } 1587 if (!attached) { 1588 // Too bad, someone else got in in between; we'll need to do a splice. 1589 // Find the last item of suffix list 1590 oop last = suffix; 1591 while (last->klass_or_null() != NULL) { 1592 last = last->list_ptr_from_klass(); 1593 } 1594 // Atomically prepend suffix to current overflow list 1595 observed_overflow_list = _overflow_list; 1596 do { 1597 cur_overflow_list = observed_overflow_list; 1598 if (cur_overflow_list != BUSY) { 1599 // Do the splice ... 1600 last->set_klass_to_list_ptr(cur_overflow_list); 1601 } else { // cur_overflow_list == BUSY 1602 last->set_klass_to_list_ptr(NULL); 1603 } 1604 observed_overflow_list = 1605 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1606 } while (cur_overflow_list != observed_overflow_list); 1607 } 1608 } 1609 1610 // Push objects on prefix list onto this thread's work queue 1611 assert(prefix != NULL && prefix != BUSY, "program logic"); 1612 cur = prefix; 1613 ssize_t n = 0; 1614 while (cur != NULL) { 1615 oop obj_to_push = cur->forwardee(); 1616 oop next = cur->list_ptr_from_klass(); 1617 cur->set_klass(obj_to_push->klass()); 1618 // This may be an array object that is self-forwarded. In that case, the list pointer 1619 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1620 if (!is_in_reserved(cur)) { 1621 // This can become a scaling bottleneck when there is work queue overflow coincident 1622 // with promotion failure. 1623 oopDesc* f = cur; 1624 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); 1625 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1626 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1627 obj_to_push = cur; 1628 } 1629 bool ok = work_q->push(obj_to_push); 1630 assert(ok, "Should have succeeded"); 1631 cur = next; 1632 n++; 1633 } 1634 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1635 #ifndef PRODUCT 1636 assert(_num_par_pushes >= n, "Too many pops?"); 1637 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1638 #endif 1639 return true; 1640 } 1641 #undef BUSY 1642 1643 void ParNewGeneration::ref_processor_init() { 1644 if (_ref_processor == NULL) { 1645 // Allocate and initialize a reference processor 1646 _ref_processor = 1647 new ReferenceProcessor(_reserved, // span 1648 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1649 (int) ParallelGCThreads, // mt processing degree 1650 refs_discovery_is_mt(), // mt discovery 1651 (int) ParallelGCThreads, // mt discovery degree 1652 refs_discovery_is_atomic(), // atomic_discovery 1653 NULL); // is_alive_non_header 1654 } 1655 } 1656 1657 const char* ParNewGeneration::name() const { 1658 return "par new generation"; 1659 }