1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parNewGeneration.hpp" 28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 30 #include "gc_implementation/shared/ageTable.hpp" 31 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/genCollectedHeap.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/generation.hpp" 37 #include "memory/generation.inline.hpp" 38 #include "memory/referencePolicy.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/sharedHeap.hpp" 41 #include "memory/space.hpp" 42 #include "oops/objArrayOop.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "oops/oop.pcgc.inline.hpp" 45 #include "runtime/handles.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/thread.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/workgroup.hpp" 52 53 #ifdef _MSC_VER 54 #pragma warning( push ) 55 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 56 #endif 57 ParScanThreadState::ParScanThreadState(Space* to_space_, 58 ParNewGeneration* gen_, 59 Generation* old_gen_, 60 int thread_num_, 61 ObjToScanQueueSet* work_queue_set_, 62 Stack<oop, mtGC>* overflow_stacks_, 63 size_t desired_plab_sz_, 64 ParallelTaskTerminator& term_) : 65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 67 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 68 _ageTable(false), // false ==> not the global age table, no perf data. 69 _to_space_alloc_buffer(desired_plab_sz_), 70 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 71 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 72 _older_gen_closure(gen_, this), 73 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 74 &_to_space_root_closure, gen_, &_old_gen_root_closure, 75 work_queue_set_, &term_), 76 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 77 _keep_alive_closure(&_scan_weak_ref_closure), 78 _promotion_failure_size(0), 79 _strong_roots_time(0.0), _term_time(0.0) 80 { 81 #if TASKQUEUE_STATS 82 _term_attempts = 0; 83 _overflow_refills = 0; 84 _overflow_refill_objs = 0; 85 #endif // TASKQUEUE_STATS 86 87 _survivor_chunk_array = 88 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 89 _hash_seed = 17; // Might want to take time-based random value. 90 _start = os::elapsedTime(); 91 _old_gen_closure.set_generation(old_gen_); 92 _old_gen_root_closure.set_generation(old_gen_); 93 } 94 #ifdef _MSC_VER 95 #pragma warning( pop ) 96 #endif 97 98 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 99 size_t plab_word_size) { 100 ChunkArray* sca = survivor_chunk_array(); 101 if (sca != NULL) { 102 // A non-null SCA implies that we want the PLAB data recorded. 103 sca->record_sample(plab_start, plab_word_size); 104 } 105 } 106 107 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 108 return new_obj->is_objArray() && 109 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 110 new_obj != old_obj; 111 } 112 113 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 114 assert(old->is_objArray(), "must be obj array"); 115 assert(old->is_forwarded(), "must be forwarded"); 116 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 117 assert(!old_gen()->is_in(old), "must be in young generation."); 118 119 objArrayOop obj = objArrayOop(old->forwardee()); 120 // Process ParGCArrayScanChunk elements now 121 // and push the remainder back onto queue 122 int start = arrayOop(old)->length(); 123 int end = obj->length(); 124 int remainder = end - start; 125 assert(start <= end, "just checking"); 126 if (remainder > 2 * ParGCArrayScanChunk) { 127 // Test above combines last partial chunk with a full chunk 128 end = start + ParGCArrayScanChunk; 129 arrayOop(old)->set_length(end); 130 // Push remainder. 131 bool ok = work_queue()->push(old); 132 assert(ok, "just popped, push must be okay"); 133 } else { 134 // Restore length so that it can be used if there 135 // is a promotion failure and forwarding pointers 136 // must be removed. 137 arrayOop(old)->set_length(end); 138 } 139 140 // process our set of indices (include header in first chunk) 141 // should make sure end is even (aligned to HeapWord in case of compressed oops) 142 if ((HeapWord *)obj < young_old_boundary()) { 143 // object is in to_space 144 obj->oop_iterate_range(&_to_space_closure, start, end); 145 } else { 146 // object is in old generation 147 obj->oop_iterate_range(&_old_gen_closure, start, end); 148 } 149 } 150 151 152 void ParScanThreadState::trim_queues(int max_size) { 153 ObjToScanQueue* queue = work_queue(); 154 do { 155 while (queue->size() > (juint)max_size) { 156 oop obj_to_scan; 157 if (queue->pop_local(obj_to_scan)) { 158 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 159 if (obj_to_scan->is_objArray() && 160 obj_to_scan->is_forwarded() && 161 obj_to_scan->forwardee() != obj_to_scan) { 162 scan_partial_array_and_push_remainder(obj_to_scan); 163 } else { 164 // object is in to_space 165 obj_to_scan->oop_iterate(&_to_space_closure); 166 } 167 } else { 168 // object is in old generation 169 obj_to_scan->oop_iterate(&_old_gen_closure); 170 } 171 } 172 } 173 // For the case of compressed oops, we have a private, non-shared 174 // overflow stack, so we eagerly drain it so as to more evenly 175 // distribute load early. Note: this may be good to do in 176 // general rather than delay for the final stealing phase. 177 // If applicable, we'll transfer a set of objects over to our 178 // work queue, allowing them to be stolen and draining our 179 // private overflow stack. 180 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 181 } 182 183 bool ParScanThreadState::take_from_overflow_stack() { 184 assert(ParGCUseLocalOverflow, "Else should not call"); 185 assert(young_gen()->overflow_list() == NULL, "Error"); 186 ObjToScanQueue* queue = work_queue(); 187 Stack<oop, mtGC>* const of_stack = overflow_stack(); 188 const size_t num_overflow_elems = of_stack->size(); 189 const size_t space_available = queue->max_elems() - queue->size(); 190 const size_t num_take_elems = MIN3(space_available / 4, 191 ParGCDesiredObjsFromOverflowList, 192 num_overflow_elems); 193 // Transfer the most recent num_take_elems from the overflow 194 // stack to our work queue. 195 for (size_t i = 0; i != num_take_elems; i++) { 196 oop cur = of_stack->pop(); 197 oop obj_to_push = cur->forwardee(); 198 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 199 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 200 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 201 if (should_be_partially_scanned(obj_to_push, cur)) { 202 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 203 obj_to_push = cur; 204 } 205 bool ok = queue->push(obj_to_push); 206 assert(ok, "Should have succeeded"); 207 } 208 assert(young_gen()->overflow_list() == NULL, "Error"); 209 return num_take_elems > 0; // was something transferred? 210 } 211 212 void ParScanThreadState::push_on_overflow_stack(oop p) { 213 assert(ParGCUseLocalOverflow, "Else should not call"); 214 overflow_stack()->push(p); 215 assert(young_gen()->overflow_list() == NULL, "Error"); 216 } 217 218 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 219 220 // Otherwise, if the object is small enough, try to reallocate the 221 // buffer. 222 HeapWord* obj = NULL; 223 if (!_to_space_full) { 224 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 225 Space* const sp = to_space(); 226 if (word_sz * 100 < 227 ParallelGCBufferWastePct * plab->word_sz()) { 228 // Is small enough; abandon this buffer and start a new one. 229 plab->retire(false, false); 230 size_t buf_size = plab->word_sz(); 231 HeapWord* buf_space = sp->par_allocate(buf_size); 232 if (buf_space == NULL) { 233 const size_t min_bytes = 234 ParGCAllocBuffer::min_size() << LogHeapWordSize; 235 size_t free_bytes = sp->free(); 236 while(buf_space == NULL && free_bytes >= min_bytes) { 237 buf_size = free_bytes >> LogHeapWordSize; 238 assert(buf_size == (size_t)align_object_size(buf_size), 239 "Invariant"); 240 buf_space = sp->par_allocate(buf_size); 241 free_bytes = sp->free(); 242 } 243 } 244 if (buf_space != NULL) { 245 plab->set_word_size(buf_size); 246 plab->set_buf(buf_space); 247 record_survivor_plab(buf_space, buf_size); 248 obj = plab->allocate(word_sz); 249 // Note that we cannot compare buf_size < word_sz below 250 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 251 assert(obj != NULL || plab->words_remaining() < word_sz, 252 "Else should have been able to allocate"); 253 // It's conceivable that we may be able to use the 254 // buffer we just grabbed for subsequent small requests 255 // even if not for this one. 256 } else { 257 // We're used up. 258 _to_space_full = true; 259 } 260 261 } else { 262 // Too large; allocate the object individually. 263 obj = sp->par_allocate(word_sz); 264 } 265 } 266 return obj; 267 } 268 269 270 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 271 size_t word_sz) { 272 // Is the alloc in the current alloc buffer? 273 if (to_space_alloc_buffer()->contains(obj)) { 274 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 275 "Should contain whole object."); 276 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 277 } else { 278 CollectedHeap::fill_with_object(obj, word_sz); 279 } 280 } 281 282 void ParScanThreadState::print_and_clear_promotion_failure_size() { 283 if (_promotion_failure_size != 0) { 284 if (PrintPromotionFailure) { 285 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 286 _thread_num, _promotion_failure_size); 287 } 288 _promotion_failure_size = 0; 289 } 290 } 291 292 class ParScanThreadStateSet: private ResourceArray { 293 public: 294 // Initializes states for the specified number of threads; 295 ParScanThreadStateSet(int num_threads, 296 Space& to_space, 297 ParNewGeneration& gen, 298 Generation& old_gen, 299 ObjToScanQueueSet& queue_set, 300 Stack<oop, mtGC>* overflow_stacks_, 301 size_t desired_plab_sz, 302 ParallelTaskTerminator& term); 303 304 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 305 306 inline ParScanThreadState& thread_state(int i); 307 308 void reset(int active_workers, bool promotion_failed); 309 void flush(); 310 311 #if TASKQUEUE_STATS 312 static void 313 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 314 void print_termination_stats(outputStream* const st = gclog_or_tty); 315 static void 316 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 317 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 318 void reset_stats(); 319 #endif // TASKQUEUE_STATS 320 321 private: 322 ParallelTaskTerminator& _term; 323 ParNewGeneration& _gen; 324 Generation& _next_gen; 325 public: 326 bool is_valid(int id) const { return id < length(); } 327 ParallelTaskTerminator* terminator() { return &_term; } 328 }; 329 330 331 ParScanThreadStateSet::ParScanThreadStateSet( 332 int num_threads, Space& to_space, ParNewGeneration& gen, 333 Generation& old_gen, ObjToScanQueueSet& queue_set, 334 Stack<oop, mtGC>* overflow_stacks, 335 size_t desired_plab_sz, ParallelTaskTerminator& term) 336 : ResourceArray(sizeof(ParScanThreadState), num_threads), 337 _gen(gen), _next_gen(old_gen), _term(term) 338 { 339 assert(num_threads > 0, "sanity check!"); 340 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 341 "overflow_stack allocation mismatch"); 342 // Initialize states. 343 for (int i = 0; i < num_threads; ++i) { 344 new ((ParScanThreadState*)_data + i) 345 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 346 overflow_stacks, desired_plab_sz, term); 347 } 348 } 349 350 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 351 { 352 assert(i >= 0 && i < length(), "sanity check!"); 353 return ((ParScanThreadState*)_data)[i]; 354 } 355 356 357 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) 358 { 359 _term.reset_for_reuse(active_threads); 360 if (promotion_failed) { 361 for (int i = 0; i < length(); ++i) { 362 thread_state(i).print_and_clear_promotion_failure_size(); 363 } 364 } 365 } 366 367 #if TASKQUEUE_STATS 368 void 369 ParScanThreadState::reset_stats() 370 { 371 taskqueue_stats().reset(); 372 _term_attempts = 0; 373 _overflow_refills = 0; 374 _overflow_refill_objs = 0; 375 } 376 377 void ParScanThreadStateSet::reset_stats() 378 { 379 for (int i = 0; i < length(); ++i) { 380 thread_state(i).reset_stats(); 381 } 382 } 383 384 void 385 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 386 { 387 st->print_raw_cr("GC Termination Stats"); 388 st->print_raw_cr(" elapsed --strong roots-- " 389 "-------termination-------"); 390 st->print_raw_cr("thr ms ms % " 391 " ms % attempts"); 392 st->print_raw_cr("--- --------- --------- ------ " 393 "--------- ------ --------"); 394 } 395 396 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 397 { 398 print_termination_stats_hdr(st); 399 400 for (int i = 0; i < length(); ++i) { 401 const ParScanThreadState & pss = thread_state(i); 402 const double elapsed_ms = pss.elapsed_time() * 1000.0; 403 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 404 const double term_ms = pss.term_time() * 1000.0; 405 st->print_cr("%3d %9.2f %9.2f %6.2f " 406 "%9.2f %6.2f " SIZE_FORMAT_W(8), 407 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 408 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 409 } 410 } 411 412 // Print stats related to work queue activity. 413 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 414 { 415 st->print_raw_cr("GC Task Stats"); 416 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 417 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 418 } 419 420 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 421 { 422 print_taskqueue_stats_hdr(st); 423 424 TaskQueueStats totals; 425 for (int i = 0; i < length(); ++i) { 426 const ParScanThreadState & pss = thread_state(i); 427 const TaskQueueStats & stats = pss.taskqueue_stats(); 428 st->print("%3d ", i); stats.print(st); st->cr(); 429 totals += stats; 430 431 if (pss.overflow_refills() > 0) { 432 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 433 SIZE_FORMAT_W(10) " overflow objects", 434 pss.overflow_refills(), pss.overflow_refill_objs()); 435 } 436 } 437 st->print("tot "); totals.print(st); st->cr(); 438 439 DEBUG_ONLY(totals.verify()); 440 } 441 #endif // TASKQUEUE_STATS 442 443 void ParScanThreadStateSet::flush() 444 { 445 // Work in this loop should be kept as lightweight as 446 // possible since this might otherwise become a bottleneck 447 // to scaling. Should we add heavy-weight work into this 448 // loop, consider parallelizing the loop into the worker threads. 449 for (int i = 0; i < length(); ++i) { 450 ParScanThreadState& par_scan_state = thread_state(i); 451 452 // Flush stats related to To-space PLAB activity and 453 // retire the last buffer. 454 par_scan_state.to_space_alloc_buffer()-> 455 flush_stats_and_retire(_gen.plab_stats(), 456 true /* end_of_gc */, 457 false /* retain */); 458 459 // Every thread has its own age table. We need to merge 460 // them all into one. 461 ageTable *local_table = par_scan_state.age_table(); 462 _gen.age_table()->merge(local_table); 463 464 // Inform old gen that we're done. 465 _next_gen.par_promote_alloc_done(i); 466 _next_gen.par_oop_since_save_marks_iterate_done(i); 467 } 468 469 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 470 // We need to call this even when ResizeOldPLAB is disabled 471 // so as to avoid breaking some asserts. While we may be able 472 // to avoid this by reorganizing the code a bit, I am loathe 473 // to do that unless we find cases where ergo leads to bad 474 // performance. 475 CFLS_LAB::compute_desired_plab_size(); 476 } 477 } 478 479 ParScanClosure::ParScanClosure(ParNewGeneration* g, 480 ParScanThreadState* par_scan_state) : 481 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) 482 { 483 assert(_g->level() == 0, "Optimized for youngest generation"); 484 _boundary = _g->reserved().end(); 485 } 486 487 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 488 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 489 490 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 491 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 492 493 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 494 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 495 496 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 497 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 498 499 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 500 ParScanThreadState* par_scan_state) 501 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 502 {} 503 504 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 505 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 506 507 #ifdef WIN32 508 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 509 #endif 510 511 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 512 ParScanThreadState* par_scan_state_, 513 ParScanWithoutBarrierClosure* to_space_closure_, 514 ParScanWithBarrierClosure* old_gen_closure_, 515 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 516 ParNewGeneration* par_gen_, 517 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 518 ObjToScanQueueSet* task_queues_, 519 ParallelTaskTerminator* terminator_) : 520 521 _par_scan_state(par_scan_state_), 522 _to_space_closure(to_space_closure_), 523 _old_gen_closure(old_gen_closure_), 524 _to_space_root_closure(to_space_root_closure_), 525 _old_gen_root_closure(old_gen_root_closure_), 526 _par_gen(par_gen_), 527 _task_queues(task_queues_), 528 _terminator(terminator_) 529 {} 530 531 void ParEvacuateFollowersClosure::do_void() { 532 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 533 534 while (true) { 535 536 // Scan to-space and old-gen objs until we run out of both. 537 oop obj_to_scan; 538 par_scan_state()->trim_queues(0); 539 540 // We have no local work, attempt to steal from other threads. 541 542 // attempt to steal work from promoted. 543 if (task_queues()->steal(par_scan_state()->thread_num(), 544 par_scan_state()->hash_seed(), 545 obj_to_scan)) { 546 bool res = work_q->push(obj_to_scan); 547 assert(res, "Empty queue should have room for a push."); 548 549 // if successful, goto Start. 550 continue; 551 552 // try global overflow list. 553 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 554 continue; 555 } 556 557 // Otherwise, offer termination. 558 par_scan_state()->start_term_time(); 559 if (terminator()->offer_termination()) break; 560 par_scan_state()->end_term_time(); 561 } 562 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 563 "Broken overflow list?"); 564 // Finish the last termination pause. 565 par_scan_state()->end_term_time(); 566 } 567 568 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 569 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 570 AbstractGangTask("ParNewGeneration collection"), 571 _gen(gen), _next_gen(next_gen), 572 _young_old_boundary(young_old_boundary), 573 _state_set(state_set) 574 {} 575 576 // Reset the terminator for the given number of 577 // active threads. 578 void ParNewGenTask::set_for_termination(int active_workers) { 579 _state_set->reset(active_workers, _gen->promotion_failed()); 580 // Should the heap be passed in? There's only 1 for now so 581 // grab it instead. 582 GenCollectedHeap* gch = GenCollectedHeap::heap(); 583 gch->set_n_termination(active_workers); 584 } 585 586 // The "i" passed to this method is the part of the work for 587 // this thread. It is not the worker ID. The "i" is derived 588 // from _started_workers which is incremented in internal_note_start() 589 // called in GangWorker loop() and which is called under the 590 // which is called under the protection of the gang monitor and is 591 // called after a task is started. So "i" is based on 592 // first-come-first-served. 593 594 void ParNewGenTask::work(uint worker_id) { 595 GenCollectedHeap* gch = GenCollectedHeap::heap(); 596 // Since this is being done in a separate thread, need new resource 597 // and handle marks. 598 ResourceMark rm; 599 HandleMark hm; 600 // We would need multiple old-gen queues otherwise. 601 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 602 603 Generation* old_gen = gch->next_gen(_gen); 604 605 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 606 assert(_state_set->is_valid(worker_id), "Should not have been called"); 607 608 par_scan_state.set_young_old_boundary(_young_old_boundary); 609 610 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 611 gch->rem_set()->klass_rem_set()); 612 613 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; 614 615 par_scan_state.start_strong_roots(); 616 gch->gen_process_strong_roots(_gen->level(), 617 true, // Process younger gens, if any, 618 // as strong roots. 619 false, // no scope; this is parallel code 620 true, // is scavenging 621 SharedHeap::ScanningOption(so), 622 &par_scan_state.to_space_root_closure(), 623 true, // walk *all* scavengable nmethods 624 &par_scan_state.older_gen_closure(), 625 &klass_scan_closure); 626 par_scan_state.end_strong_roots(); 627 628 // "evacuate followers". 629 par_scan_state.evacuate_followers_closure().do_void(); 630 } 631 632 #ifdef _MSC_VER 633 #pragma warning( push ) 634 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 635 #endif 636 ParNewGeneration:: 637 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 638 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 639 _overflow_list(NULL), 640 _is_alive_closure(this), 641 _plab_stats(YoungPLABSize, PLABWeight) 642 { 643 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 644 NOT_PRODUCT(_num_par_pushes = 0;) 645 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 646 guarantee(_task_queues != NULL, "task_queues allocation failure."); 647 648 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 649 ObjToScanQueue *q = new ObjToScanQueue(); 650 guarantee(q != NULL, "work_queue Allocation failure."); 651 _task_queues->register_queue(i1, q); 652 } 653 654 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 655 _task_queues->queue(i2)->initialize(); 656 657 _overflow_stacks = NULL; 658 if (ParGCUseLocalOverflow) { 659 660 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 661 // with ',' 662 typedef Stack<oop, mtGC> GCOopStack; 663 664 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 665 for (size_t i = 0; i < ParallelGCThreads; ++i) { 666 new (_overflow_stacks + i) Stack<oop, mtGC>(); 667 } 668 } 669 670 if (UsePerfData) { 671 EXCEPTION_MARK; 672 ResourceMark rm; 673 674 const char* cname = 675 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 676 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 677 ParallelGCThreads, CHECK); 678 } 679 } 680 #ifdef _MSC_VER 681 #pragma warning( pop ) 682 #endif 683 684 // ParNewGeneration:: 685 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 686 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 687 688 template <class T> 689 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 690 #ifdef ASSERT 691 { 692 assert(!oopDesc::is_null(*p), "expected non-null ref"); 693 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 694 // We never expect to see a null reference being processed 695 // as a weak reference. 696 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 697 } 698 #endif // ASSERT 699 700 _par_cl->do_oop_nv(p); 701 702 if (Universe::heap()->is_in_reserved(p)) { 703 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 704 _rs->write_ref_field_gc_par(p, obj); 705 } 706 } 707 708 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 709 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 710 711 // ParNewGeneration:: 712 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 713 DefNewGeneration::KeepAliveClosure(cl) {} 714 715 template <class T> 716 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 717 #ifdef ASSERT 718 { 719 assert(!oopDesc::is_null(*p), "expected non-null ref"); 720 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 721 // We never expect to see a null reference being processed 722 // as a weak reference. 723 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 724 } 725 #endif // ASSERT 726 727 _cl->do_oop_nv(p); 728 729 if (Universe::heap()->is_in_reserved(p)) { 730 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 731 _rs->write_ref_field_gc_par(p, obj); 732 } 733 } 734 735 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 736 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 737 738 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 739 T heap_oop = oopDesc::load_heap_oop(p); 740 if (!oopDesc::is_null(heap_oop)) { 741 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 742 if ((HeapWord*)obj < _boundary) { 743 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 744 oop new_obj = obj->is_forwarded() 745 ? obj->forwardee() 746 : _g->DefNewGeneration::copy_to_survivor_space(obj); 747 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 748 } 749 if (_gc_barrier) { 750 // If p points to a younger generation, mark the card. 751 if ((HeapWord*)obj < _gen_boundary) { 752 _rs->write_ref_field_gc_par(p, obj); 753 } 754 } 755 } 756 } 757 758 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 759 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 760 761 class ParNewRefProcTaskProxy: public AbstractGangTask { 762 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 763 public: 764 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 765 Generation& next_gen, 766 HeapWord* young_old_boundary, 767 ParScanThreadStateSet& state_set); 768 769 private: 770 virtual void work(uint worker_id); 771 virtual void set_for_termination(int active_workers) { 772 _state_set.terminator()->reset_for_reuse(active_workers); 773 } 774 private: 775 ParNewGeneration& _gen; 776 ProcessTask& _task; 777 Generation& _next_gen; 778 HeapWord* _young_old_boundary; 779 ParScanThreadStateSet& _state_set; 780 }; 781 782 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 783 ProcessTask& task, ParNewGeneration& gen, 784 Generation& next_gen, 785 HeapWord* young_old_boundary, 786 ParScanThreadStateSet& state_set) 787 : AbstractGangTask("ParNewGeneration parallel reference processing"), 788 _gen(gen), 789 _task(task), 790 _next_gen(next_gen), 791 _young_old_boundary(young_old_boundary), 792 _state_set(state_set) 793 { 794 } 795 796 void ParNewRefProcTaskProxy::work(uint worker_id) 797 { 798 ResourceMark rm; 799 HandleMark hm; 800 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 801 par_scan_state.set_young_old_boundary(_young_old_boundary); 802 _task.work(worker_id, par_scan_state.is_alive_closure(), 803 par_scan_state.keep_alive_closure(), 804 par_scan_state.evacuate_followers_closure()); 805 } 806 807 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 808 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 809 EnqueueTask& _task; 810 811 public: 812 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 813 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 814 _task(task) 815 { } 816 817 virtual void work(uint worker_id) 818 { 819 _task.work(worker_id); 820 } 821 }; 822 823 824 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 825 { 826 GenCollectedHeap* gch = GenCollectedHeap::heap(); 827 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 828 "not a generational heap"); 829 FlexibleWorkGang* workers = gch->workers(); 830 assert(workers != NULL, "Need parallel worker threads."); 831 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 832 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 833 _generation.reserved().end(), _state_set); 834 workers->run_task(&rp_task); 835 _state_set.reset(0 /* bad value in debug if not reset */, 836 _generation.promotion_failed()); 837 } 838 839 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 840 { 841 GenCollectedHeap* gch = GenCollectedHeap::heap(); 842 FlexibleWorkGang* workers = gch->workers(); 843 assert(workers != NULL, "Need parallel worker threads."); 844 ParNewRefEnqueueTaskProxy enq_task(task); 845 workers->run_task(&enq_task); 846 } 847 848 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 849 { 850 _state_set.flush(); 851 GenCollectedHeap* gch = GenCollectedHeap::heap(); 852 gch->set_par_threads(0); // 0 ==> non-parallel. 853 gch->save_marks(); 854 } 855 856 ScanClosureWithParBarrier:: 857 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 858 ScanClosure(g, gc_barrier) {} 859 860 EvacuateFollowersClosureGeneral:: 861 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 862 OopsInGenClosure* cur, 863 OopsInGenClosure* older) : 864 _gch(gch), _level(level), 865 _scan_cur_or_nonheap(cur), _scan_older(older) 866 {} 867 868 void EvacuateFollowersClosureGeneral::do_void() { 869 do { 870 // Beware: this call will lead to closure applications via virtual 871 // calls. 872 _gch->oop_since_save_marks_iterate(_level, 873 _scan_cur_or_nonheap, 874 _scan_older); 875 } while (!_gch->no_allocs_since_save_marks(_level)); 876 } 877 878 879 bool ParNewGeneration::_avoid_promotion_undo = false; 880 881 void ParNewGeneration::adjust_desired_tenuring_threshold() { 882 // Set the desired survivor size to half the real survivor space 883 _tenuring_threshold = 884 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 885 } 886 887 // A Generation that does parallel young-gen collection. 888 889 void ParNewGeneration::collect(bool full, 890 bool clear_all_soft_refs, 891 size_t size, 892 bool is_tlab) { 893 assert(full || size > 0, "otherwise we don't want to collect"); 894 GenCollectedHeap* gch = GenCollectedHeap::heap(); 895 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 896 "not a CMS generational heap"); 897 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 898 FlexibleWorkGang* workers = gch->workers(); 899 assert(workers != NULL, "Need workgang for parallel work"); 900 int active_workers = 901 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 902 workers->active_workers(), 903 Threads::number_of_non_daemon_threads()); 904 workers->set_active_workers(active_workers); 905 _next_gen = gch->next_gen(this); 906 assert(_next_gen != NULL, 907 "This must be the youngest gen, and not the only gen"); 908 assert(gch->n_gens() == 2, 909 "Par collection currently only works with single older gen."); 910 // Do we have to avoid promotion_undo? 911 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 912 set_avoid_promotion_undo(true); 913 } 914 915 // If the next generation is too full to accomodate worst-case promotion 916 // from this generation, pass on collection; let the next generation 917 // do it. 918 if (!collection_attempt_is_safe()) { 919 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 920 return; 921 } 922 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 923 924 init_assuming_no_promotion_failure(); 925 926 if (UseAdaptiveSizePolicy) { 927 set_survivor_overflow(false); 928 size_policy->minor_collection_begin(); 929 } 930 931 TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); 932 // Capture heap used before collection (for printing). 933 size_t gch_prev_used = gch->used(); 934 935 SpecializationStats::clear(); 936 937 age_table()->clear(); 938 to()->clear(SpaceDecorator::Mangle); 939 940 gch->save_marks(); 941 assert(workers != NULL, "Need parallel worker threads."); 942 int n_workers = active_workers; 943 944 // Set the correct parallelism (number of queues) in the reference processor 945 ref_processor()->set_active_mt_degree(n_workers); 946 947 // Always set the terminator for the active number of workers 948 // because only those workers go through the termination protocol. 949 ParallelTaskTerminator _term(n_workers, task_queues()); 950 ParScanThreadStateSet thread_state_set(workers->active_workers(), 951 *to(), *this, *_next_gen, *task_queues(), 952 _overflow_stacks, desired_plab_sz(), _term); 953 954 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 955 gch->set_par_threads(n_workers); 956 gch->rem_set()->prepare_for_younger_refs_iterate(true); 957 // It turns out that even when we're using 1 thread, doing the work in a 958 // separate thread causes wide variance in run times. We can't help this 959 // in the multi-threaded case, but we special-case n=1 here to get 960 // repeatable measurements of the 1-thread overhead of the parallel code. 961 if (n_workers > 1) { 962 GenCollectedHeap::StrongRootsScope srs(gch); 963 workers->run_task(&tsk); 964 } else { 965 GenCollectedHeap::StrongRootsScope srs(gch); 966 tsk.work(0); 967 } 968 thread_state_set.reset(0 /* Bad value in debug if not reset */, 969 promotion_failed()); 970 971 // Process (weak) reference objects found during scavenge. 972 ReferenceProcessor* rp = ref_processor(); 973 IsAliveClosure is_alive(this); 974 ScanWeakRefClosure scan_weak_ref(this); 975 KeepAliveClosure keep_alive(&scan_weak_ref); 976 ScanClosure scan_without_gc_barrier(this, false); 977 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 978 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 979 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 980 &scan_without_gc_barrier, &scan_with_gc_barrier); 981 rp->setup_policy(clear_all_soft_refs); 982 // Can the mt_degree be set later (at run_task() time would be best)? 983 rp->set_active_mt_degree(active_workers); 984 if (rp->processing_is_mt()) { 985 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 986 rp->process_discovered_references(&is_alive, &keep_alive, 987 &evacuate_followers, &task_executor); 988 } else { 989 thread_state_set.flush(); 990 gch->set_par_threads(0); // 0 ==> non-parallel. 991 gch->save_marks(); 992 rp->process_discovered_references(&is_alive, &keep_alive, 993 &evacuate_followers, NULL); 994 } 995 if (!promotion_failed()) { 996 // Swap the survivor spaces. 997 eden()->clear(SpaceDecorator::Mangle); 998 from()->clear(SpaceDecorator::Mangle); 999 if (ZapUnusedHeapArea) { 1000 // This is now done here because of the piece-meal mangling which 1001 // can check for valid mangling at intermediate points in the 1002 // collection(s). When a minor collection fails to collect 1003 // sufficient space resizing of the young generation can occur 1004 // an redistribute the spaces in the young generation. Mangle 1005 // here so that unzapped regions don't get distributed to 1006 // other spaces. 1007 to()->mangle_unused_area(); 1008 } 1009 swap_spaces(); 1010 1011 // A successful scavenge should restart the GC time limit count which is 1012 // for full GC's. 1013 size_policy->reset_gc_overhead_limit_count(); 1014 1015 assert(to()->is_empty(), "to space should be empty now"); 1016 } else { 1017 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 1018 _promo_failure_scan_stack.clear(true); // Clear cached segments. 1019 1020 remove_forwarding_pointers(); 1021 if (PrintGCDetails) { 1022 gclog_or_tty->print(" (promotion failed)"); 1023 } 1024 // All the spaces are in play for mark-sweep. 1025 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 1026 from()->set_next_compaction_space(to()); 1027 gch->set_incremental_collection_failed(); 1028 // Inform the next generation that a promotion failure occurred. 1029 _next_gen->promotion_failure_occurred(); 1030 1031 // Reset the PromotionFailureALot counters. 1032 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 1033 } 1034 // set new iteration safe limit for the survivor spaces 1035 from()->set_concurrent_iteration_safe_limit(from()->top()); 1036 to()->set_concurrent_iteration_safe_limit(to()->top()); 1037 1038 adjust_desired_tenuring_threshold(); 1039 if (ResizePLAB) { 1040 plab_stats()->adjust_desired_plab_sz(n_workers); 1041 } 1042 1043 if (PrintGC && !PrintGCDetails) { 1044 gch->print_heap_change(gch_prev_used); 1045 } 1046 1047 if (PrintGCDetails && ParallelGCVerbose) { 1048 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1049 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1050 } 1051 1052 if (UseAdaptiveSizePolicy) { 1053 size_policy->minor_collection_end(gch->gc_cause()); 1054 size_policy->avg_survived()->sample(from()->used()); 1055 } 1056 1057 // We need to use a monotonically non-deccreasing time in ms 1058 // or we will see time-warp warnings and os::javaTimeMillis() 1059 // does not guarantee monotonicity. 1060 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1061 update_time_of_last_gc(now); 1062 1063 SpecializationStats::print(); 1064 1065 rp->set_enqueuing_is_done(true); 1066 if (rp->processing_is_mt()) { 1067 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1068 rp->enqueue_discovered_references(&task_executor); 1069 } else { 1070 rp->enqueue_discovered_references(NULL); 1071 } 1072 rp->verify_no_references_recorded(); 1073 } 1074 1075 static int sum; 1076 void ParNewGeneration::waste_some_time() { 1077 for (int i = 0; i < 100; i++) { 1078 sum += i; 1079 } 1080 } 1081 1082 static const oop ClaimedForwardPtr = oop(0x4); 1083 1084 // Because of concurrency, there are times where an object for which 1085 // "is_forwarded()" is true contains an "interim" forwarding pointer 1086 // value. Such a value will soon be overwritten with a real value. 1087 // This method requires "obj" to have a forwarding pointer, and waits, if 1088 // necessary for a real one to be inserted, and returns it. 1089 1090 oop ParNewGeneration::real_forwardee(oop obj) { 1091 oop forward_ptr = obj->forwardee(); 1092 if (forward_ptr != ClaimedForwardPtr) { 1093 return forward_ptr; 1094 } else { 1095 return real_forwardee_slow(obj); 1096 } 1097 } 1098 1099 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1100 // Spin-read if it is claimed but not yet written by another thread. 1101 oop forward_ptr = obj->forwardee(); 1102 while (forward_ptr == ClaimedForwardPtr) { 1103 waste_some_time(); 1104 assert(obj->is_forwarded(), "precondition"); 1105 forward_ptr = obj->forwardee(); 1106 } 1107 return forward_ptr; 1108 } 1109 1110 #ifdef ASSERT 1111 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1112 return 1113 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1114 || Universe::heap()->is_in_reserved(p); 1115 } 1116 #endif 1117 1118 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1119 if (m->must_be_preserved_for_promotion_failure(obj)) { 1120 // We should really have separate per-worker stacks, rather 1121 // than use locking of a common pair of stacks. 1122 MutexLocker ml(ParGCRareEvent_lock); 1123 preserve_mark(obj, m); 1124 } 1125 } 1126 1127 // Multiple GC threads may try to promote an object. If the object 1128 // is successfully promoted, a forwarding pointer will be installed in 1129 // the object in the young generation. This method claims the right 1130 // to install the forwarding pointer before it copies the object, 1131 // thus avoiding the need to undo the copy as in 1132 // copy_to_survivor_space_avoiding_with_undo. 1133 1134 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1135 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1136 // In the sequential version, this assert also says that the object is 1137 // not forwarded. That might not be the case here. It is the case that 1138 // the caller observed it to be not forwarded at some time in the past. 1139 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1140 1141 // The sequential code read "old->age()" below. That doesn't work here, 1142 // since the age is in the mark word, and that might be overwritten with 1143 // a forwarding pointer by a parallel thread. So we must save the mark 1144 // word in a local and then analyze it. 1145 oopDesc dummyOld; 1146 dummyOld.set_mark(m); 1147 assert(!dummyOld.is_forwarded(), 1148 "should not be called with forwarding pointer mark word."); 1149 1150 oop new_obj = NULL; 1151 oop forward_ptr; 1152 1153 // Try allocating obj in to-space (unless too old) 1154 if (dummyOld.age() < tenuring_threshold()) { 1155 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1156 if (new_obj == NULL) { 1157 set_survivor_overflow(true); 1158 } 1159 } 1160 1161 if (new_obj == NULL) { 1162 // Either to-space is full or we decided to promote 1163 // try allocating obj tenured 1164 1165 // Attempt to install a null forwarding pointer (atomically), 1166 // to claim the right to install the real forwarding pointer. 1167 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1168 if (forward_ptr != NULL) { 1169 // someone else beat us to it. 1170 return real_forwardee(old); 1171 } 1172 1173 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1174 old, m, sz); 1175 1176 if (new_obj == NULL) { 1177 // promotion failed, forward to self 1178 _promotion_failed = true; 1179 new_obj = old; 1180 1181 preserve_mark_if_necessary(old, m); 1182 // Log the size of the maiden promotion failure 1183 par_scan_state->log_promotion_failure(sz); 1184 } 1185 1186 old->forward_to(new_obj); 1187 forward_ptr = NULL; 1188 } else { 1189 // Is in to-space; do copying ourselves. 1190 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1191 forward_ptr = old->forward_to_atomic(new_obj); 1192 // Restore the mark word copied above. 1193 new_obj->set_mark(m); 1194 // Increment age if obj still in new generation 1195 new_obj->incr_age(); 1196 par_scan_state->age_table()->add(new_obj, sz); 1197 } 1198 assert(new_obj != NULL, "just checking"); 1199 1200 #ifndef PRODUCT 1201 // This code must come after the CAS test, or it will print incorrect 1202 // information. 1203 if (TraceScavenge) { 1204 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1205 is_in_reserved(new_obj) ? "copying" : "tenuring", 1206 new_obj->klass()->internal_name(), old, new_obj, new_obj->size()); 1207 } 1208 #endif 1209 1210 if (forward_ptr == NULL) { 1211 oop obj_to_push = new_obj; 1212 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1213 // Length field used as index of next element to be scanned. 1214 // Real length can be obtained from real_forwardee() 1215 arrayOop(old)->set_length(0); 1216 obj_to_push = old; 1217 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1218 "push forwarded object"); 1219 } 1220 // Push it on one of the queues of to-be-scanned objects. 1221 bool simulate_overflow = false; 1222 NOT_PRODUCT( 1223 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1224 // simulate a stack overflow 1225 simulate_overflow = true; 1226 } 1227 ) 1228 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1229 // Add stats for overflow pushes. 1230 if (Verbose && PrintGCDetails) { 1231 gclog_or_tty->print("queue overflow!\n"); 1232 } 1233 push_on_overflow_list(old, par_scan_state); 1234 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1235 } 1236 1237 return new_obj; 1238 } 1239 1240 // Oops. Someone beat us to it. Undo the allocation. Where did we 1241 // allocate it? 1242 if (is_in_reserved(new_obj)) { 1243 // Must be in to_space. 1244 assert(to()->is_in_reserved(new_obj), "Checking"); 1245 if (forward_ptr == ClaimedForwardPtr) { 1246 // Wait to get the real forwarding pointer value. 1247 forward_ptr = real_forwardee(old); 1248 } 1249 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1250 } 1251 1252 return forward_ptr; 1253 } 1254 1255 1256 // Multiple GC threads may try to promote the same object. If two 1257 // or more GC threads copy the object, only one wins the race to install 1258 // the forwarding pointer. The other threads have to undo their copy. 1259 1260 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1261 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1262 1263 // In the sequential version, this assert also says that the object is 1264 // not forwarded. That might not be the case here. It is the case that 1265 // the caller observed it to be not forwarded at some time in the past. 1266 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1267 1268 // The sequential code read "old->age()" below. That doesn't work here, 1269 // since the age is in the mark word, and that might be overwritten with 1270 // a forwarding pointer by a parallel thread. So we must save the mark 1271 // word here, install it in a local oopDesc, and then analyze it. 1272 oopDesc dummyOld; 1273 dummyOld.set_mark(m); 1274 assert(!dummyOld.is_forwarded(), 1275 "should not be called with forwarding pointer mark word."); 1276 1277 bool failed_to_promote = false; 1278 oop new_obj = NULL; 1279 oop forward_ptr; 1280 1281 // Try allocating obj in to-space (unless too old) 1282 if (dummyOld.age() < tenuring_threshold()) { 1283 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1284 if (new_obj == NULL) { 1285 set_survivor_overflow(true); 1286 } 1287 } 1288 1289 if (new_obj == NULL) { 1290 // Either to-space is full or we decided to promote 1291 // try allocating obj tenured 1292 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1293 old, m, sz); 1294 1295 if (new_obj == NULL) { 1296 // promotion failed, forward to self 1297 forward_ptr = old->forward_to_atomic(old); 1298 new_obj = old; 1299 1300 if (forward_ptr != NULL) { 1301 return forward_ptr; // someone else succeeded 1302 } 1303 1304 _promotion_failed = true; 1305 failed_to_promote = true; 1306 1307 preserve_mark_if_necessary(old, m); 1308 // Log the size of the maiden promotion failure 1309 par_scan_state->log_promotion_failure(sz); 1310 } 1311 } else { 1312 // Is in to-space; do copying ourselves. 1313 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1314 // Restore the mark word copied above. 1315 new_obj->set_mark(m); 1316 // Increment age if new_obj still in new generation 1317 new_obj->incr_age(); 1318 par_scan_state->age_table()->add(new_obj, sz); 1319 } 1320 assert(new_obj != NULL, "just checking"); 1321 1322 #ifndef PRODUCT 1323 // This code must come after the CAS test, or it will print incorrect 1324 // information. 1325 if (TraceScavenge) { 1326 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1327 is_in_reserved(new_obj) ? "copying" : "tenuring", 1328 new_obj->klass()->internal_name(), old, new_obj, new_obj->size()); 1329 } 1330 #endif 1331 1332 // Now attempt to install the forwarding pointer (atomically). 1333 // We have to copy the mark word before overwriting with forwarding 1334 // ptr, so we can restore it below in the copy. 1335 if (!failed_to_promote) { 1336 forward_ptr = old->forward_to_atomic(new_obj); 1337 } 1338 1339 if (forward_ptr == NULL) { 1340 oop obj_to_push = new_obj; 1341 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1342 // Length field used as index of next element to be scanned. 1343 // Real length can be obtained from real_forwardee() 1344 arrayOop(old)->set_length(0); 1345 obj_to_push = old; 1346 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1347 "push forwarded object"); 1348 } 1349 // Push it on one of the queues of to-be-scanned objects. 1350 bool simulate_overflow = false; 1351 NOT_PRODUCT( 1352 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1353 // simulate a stack overflow 1354 simulate_overflow = true; 1355 } 1356 ) 1357 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1358 // Add stats for overflow pushes. 1359 push_on_overflow_list(old, par_scan_state); 1360 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1361 } 1362 1363 return new_obj; 1364 } 1365 1366 // Oops. Someone beat us to it. Undo the allocation. Where did we 1367 // allocate it? 1368 if (is_in_reserved(new_obj)) { 1369 // Must be in to_space. 1370 assert(to()->is_in_reserved(new_obj), "Checking"); 1371 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1372 } else { 1373 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1374 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1375 (HeapWord*)new_obj, sz); 1376 } 1377 1378 return forward_ptr; 1379 } 1380 1381 #ifndef PRODUCT 1382 // It's OK to call this multi-threaded; the worst thing 1383 // that can happen is that we'll get a bunch of closely 1384 // spaced simulated oveflows, but that's OK, in fact 1385 // probably good as it would exercise the overflow code 1386 // under contention. 1387 bool ParNewGeneration::should_simulate_overflow() { 1388 if (_overflow_counter-- <= 0) { // just being defensive 1389 _overflow_counter = ParGCWorkQueueOverflowInterval; 1390 return true; 1391 } else { 1392 return false; 1393 } 1394 } 1395 #endif 1396 1397 // In case we are using compressed oops, we need to be careful. 1398 // If the object being pushed is an object array, then its length 1399 // field keeps track of the "grey boundary" at which the next 1400 // incremental scan will be done (see ParGCArrayScanChunk). 1401 // When using compressed oops, this length field is kept in the 1402 // lower 32 bits of the erstwhile klass word and cannot be used 1403 // for the overflow chaining pointer (OCP below). As such the OCP 1404 // would itself need to be compressed into the top 32-bits in this 1405 // case. Unfortunately, see below, in the event that we have a 1406 // promotion failure, the node to be pushed on the list can be 1407 // outside of the Java heap, so the heap-based pointer compression 1408 // would not work (we would have potential aliasing between C-heap 1409 // and Java-heap pointers). For this reason, when using compressed 1410 // oops, we simply use a worker-thread-local, non-shared overflow 1411 // list in the form of a growable array, with a slightly different 1412 // overflow stack draining strategy. If/when we start using fat 1413 // stacks here, we can go back to using (fat) pointer chains 1414 // (although some performance comparisons would be useful since 1415 // single global lists have their own performance disadvantages 1416 // as we were made painfully aware not long ago, see 6786503). 1417 #define BUSY (oop(0x1aff1aff)) 1418 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1419 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1420 if (ParGCUseLocalOverflow) { 1421 // In the case of compressed oops, we use a private, not-shared 1422 // overflow stack. 1423 par_scan_state->push_on_overflow_stack(from_space_obj); 1424 } else { 1425 assert(!UseCompressedOops, "Error"); 1426 // if the object has been forwarded to itself, then we cannot 1427 // use the klass pointer for the linked list. Instead we have 1428 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1429 // XXX This is horribly inefficient when a promotion failure occurs 1430 // and should be fixed. XXX FIX ME !!! 1431 #ifndef PRODUCT 1432 Atomic::inc_ptr(&_num_par_pushes); 1433 assert(_num_par_pushes > 0, "Tautology"); 1434 #endif 1435 if (from_space_obj->forwardee() == from_space_obj) { 1436 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1437 listhead->forward_to(from_space_obj); 1438 from_space_obj = listhead; 1439 } 1440 oop observed_overflow_list = _overflow_list; 1441 oop cur_overflow_list; 1442 do { 1443 cur_overflow_list = observed_overflow_list; 1444 if (cur_overflow_list != BUSY) { 1445 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1446 } else { 1447 from_space_obj->set_klass_to_list_ptr(NULL); 1448 } 1449 observed_overflow_list = 1450 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1451 } while (cur_overflow_list != observed_overflow_list); 1452 } 1453 } 1454 1455 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1456 bool res; 1457 1458 if (ParGCUseLocalOverflow) { 1459 res = par_scan_state->take_from_overflow_stack(); 1460 } else { 1461 assert(!UseCompressedOops, "Error"); 1462 res = take_from_overflow_list_work(par_scan_state); 1463 } 1464 return res; 1465 } 1466 1467 1468 // *NOTE*: The overflow list manipulation code here and 1469 // in CMSCollector:: are very similar in shape, 1470 // except that in the CMS case we thread the objects 1471 // directly into the list via their mark word, and do 1472 // not need to deal with special cases below related 1473 // to chunking of object arrays and promotion failure 1474 // handling. 1475 // CR 6797058 has been filed to attempt consolidation of 1476 // the common code. 1477 // Because of the common code, if you make any changes in 1478 // the code below, please check the CMS version to see if 1479 // similar changes might be needed. 1480 // See CMSCollector::par_take_from_overflow_list() for 1481 // more extensive documentation comments. 1482 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1483 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1484 // How many to take? 1485 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1486 (size_t)ParGCDesiredObjsFromOverflowList); 1487 1488 assert(!UseCompressedOops, "Error"); 1489 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1490 if (_overflow_list == NULL) return false; 1491 1492 // Otherwise, there was something there; try claiming the list. 1493 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1494 // Trim off a prefix of at most objsFromOverflow items 1495 Thread* tid = Thread::current(); 1496 size_t spin_count = (size_t)ParallelGCThreads; 1497 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1498 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1499 // someone grabbed it before we did ... 1500 // ... we spin for a short while... 1501 os::sleep(tid, sleep_time_millis, false); 1502 if (_overflow_list == NULL) { 1503 // nothing left to take 1504 return false; 1505 } else if (_overflow_list != BUSY) { 1506 // try and grab the prefix 1507 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1508 } 1509 } 1510 if (prefix == NULL || prefix == BUSY) { 1511 // Nothing to take or waited long enough 1512 if (prefix == NULL) { 1513 // Write back the NULL in case we overwrote it with BUSY above 1514 // and it is still the same value. 1515 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1516 } 1517 return false; 1518 } 1519 assert(prefix != NULL && prefix != BUSY, "Error"); 1520 size_t i = 1; 1521 oop cur = prefix; 1522 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1523 i++; cur = cur->list_ptr_from_klass(); 1524 } 1525 1526 // Reattach remaining (suffix) to overflow list 1527 if (cur->klass_or_null() == NULL) { 1528 // Write back the NULL in lieu of the BUSY we wrote 1529 // above and it is still the same value. 1530 if (_overflow_list == BUSY) { 1531 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1532 } 1533 } else { 1534 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1535 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1536 cur->set_klass_to_list_ptr(NULL); // break off suffix 1537 // It's possible that the list is still in the empty(busy) state 1538 // we left it in a short while ago; in that case we may be 1539 // able to place back the suffix. 1540 oop observed_overflow_list = _overflow_list; 1541 oop cur_overflow_list = observed_overflow_list; 1542 bool attached = false; 1543 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1544 observed_overflow_list = 1545 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1546 if (cur_overflow_list == observed_overflow_list) { 1547 attached = true; 1548 break; 1549 } else cur_overflow_list = observed_overflow_list; 1550 } 1551 if (!attached) { 1552 // Too bad, someone else got in in between; we'll need to do a splice. 1553 // Find the last item of suffix list 1554 oop last = suffix; 1555 while (last->klass_or_null() != NULL) { 1556 last = last->list_ptr_from_klass(); 1557 } 1558 // Atomically prepend suffix to current overflow list 1559 observed_overflow_list = _overflow_list; 1560 do { 1561 cur_overflow_list = observed_overflow_list; 1562 if (cur_overflow_list != BUSY) { 1563 // Do the splice ... 1564 last->set_klass_to_list_ptr(cur_overflow_list); 1565 } else { // cur_overflow_list == BUSY 1566 last->set_klass_to_list_ptr(NULL); 1567 } 1568 observed_overflow_list = 1569 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1570 } while (cur_overflow_list != observed_overflow_list); 1571 } 1572 } 1573 1574 // Push objects on prefix list onto this thread's work queue 1575 assert(prefix != NULL && prefix != BUSY, "program logic"); 1576 cur = prefix; 1577 ssize_t n = 0; 1578 while (cur != NULL) { 1579 oop obj_to_push = cur->forwardee(); 1580 oop next = cur->list_ptr_from_klass(); 1581 cur->set_klass(obj_to_push->klass()); 1582 // This may be an array object that is self-forwarded. In that case, the list pointer 1583 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1584 if (!is_in_reserved(cur)) { 1585 // This can become a scaling bottleneck when there is work queue overflow coincident 1586 // with promotion failure. 1587 oopDesc* f = cur; 1588 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); 1589 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1590 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1591 obj_to_push = cur; 1592 } 1593 bool ok = work_q->push(obj_to_push); 1594 assert(ok, "Should have succeeded"); 1595 cur = next; 1596 n++; 1597 } 1598 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1599 #ifndef PRODUCT 1600 assert(_num_par_pushes >= n, "Too many pops?"); 1601 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1602 #endif 1603 return true; 1604 } 1605 #undef BUSY 1606 1607 void ParNewGeneration::ref_processor_init() 1608 { 1609 if (_ref_processor == NULL) { 1610 // Allocate and initialize a reference processor 1611 _ref_processor = 1612 new ReferenceProcessor(_reserved, // span 1613 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1614 (int) ParallelGCThreads, // mt processing degree 1615 refs_discovery_is_mt(), // mt discovery 1616 (int) ParallelGCThreads, // mt discovery degree 1617 refs_discovery_is_atomic(), // atomic_discovery 1618 NULL, // is_alive_non_header 1619 false); // write barrier for next field updates 1620 } 1621 } 1622 1623 const char* ParNewGeneration::name() const { 1624 return "par new generation"; 1625 }