1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" 27 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" 28 #include "gc_implementation/parNew/parNewGeneration.hpp" 29 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 30 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 31 #include "gc_implementation/shared/ageTable.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/genCollectedHeap.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/generation.hpp" 37 #include "memory/generation.inline.hpp" 38 #include "memory/referencePolicy.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/sharedHeap.hpp" 41 #include "memory/space.hpp" 42 #include "oops/objArrayOop.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "oops/oop.pcgc.inline.hpp" 45 #include "runtime/handles.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/thread.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/workgroup.hpp" 52 53 #ifdef _MSC_VER 54 #pragma warning( push ) 55 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 56 #endif 57 ParScanThreadState::ParScanThreadState(Space* to_space_, 58 ParNewGeneration* gen_, 59 Generation* old_gen_, 60 int thread_num_, 61 ObjToScanQueueSet* work_queue_set_, 62 GrowableArray<oop>** overflow_stack_set_, 63 size_t desired_plab_sz_, 64 ParallelTaskTerminator& term_) : 65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 67 _overflow_stack(overflow_stack_set_[thread_num_]), 68 _ageTable(false), // false ==> not the global age table, no perf data. 69 _to_space_alloc_buffer(desired_plab_sz_), 70 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 71 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 72 _older_gen_closure(gen_, this), 73 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 74 &_to_space_root_closure, gen_, &_old_gen_root_closure, 75 work_queue_set_, &term_), 76 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 77 _keep_alive_closure(&_scan_weak_ref_closure), 78 _promotion_failure_size(0), 79 _strong_roots_time(0.0), _term_time(0.0) 80 { 81 #if TASKQUEUE_STATS 82 _term_attempts = 0; 83 _overflow_refills = 0; 84 _overflow_refill_objs = 0; 85 #endif // TASKQUEUE_STATS 86 87 _survivor_chunk_array = 88 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 89 _hash_seed = 17; // Might want to take time-based random value. 90 _start = os::elapsedTime(); 91 _old_gen_closure.set_generation(old_gen_); 92 _old_gen_root_closure.set_generation(old_gen_); 93 } 94 #ifdef _MSC_VER 95 #pragma warning( pop ) 96 #endif 97 98 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 99 size_t plab_word_size) { 100 ChunkArray* sca = survivor_chunk_array(); 101 if (sca != NULL) { 102 // A non-null SCA implies that we want the PLAB data recorded. 103 sca->record_sample(plab_start, plab_word_size); 104 } 105 } 106 107 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 108 return new_obj->is_objArray() && 109 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 110 new_obj != old_obj; 111 } 112 113 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 114 assert(old->is_objArray(), "must be obj array"); 115 assert(old->is_forwarded(), "must be forwarded"); 116 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 117 assert(!old_gen()->is_in(old), "must be in young generation."); 118 119 objArrayOop obj = objArrayOop(old->forwardee()); 120 // Process ParGCArrayScanChunk elements now 121 // and push the remainder back onto queue 122 int start = arrayOop(old)->length(); 123 int end = obj->length(); 124 int remainder = end - start; 125 assert(start <= end, "just checking"); 126 if (remainder > 2 * ParGCArrayScanChunk) { 127 // Test above combines last partial chunk with a full chunk 128 end = start + ParGCArrayScanChunk; 129 arrayOop(old)->set_length(end); 130 // Push remainder. 131 bool ok = work_queue()->push(old); 132 assert(ok, "just popped, push must be okay"); 133 } else { 134 // Restore length so that it can be used if there 135 // is a promotion failure and forwarding pointers 136 // must be removed. 137 arrayOop(old)->set_length(end); 138 } 139 140 // process our set of indices (include header in first chunk) 141 // should make sure end is even (aligned to HeapWord in case of compressed oops) 142 if ((HeapWord *)obj < young_old_boundary()) { 143 // object is in to_space 144 obj->oop_iterate_range(&_to_space_closure, start, end); 145 } else { 146 // object is in old generation 147 obj->oop_iterate_range(&_old_gen_closure, start, end); 148 } 149 } 150 151 152 void ParScanThreadState::trim_queues(int max_size) { 153 ObjToScanQueue* queue = work_queue(); 154 do { 155 while (queue->size() > (juint)max_size) { 156 oop obj_to_scan; 157 if (queue->pop_local(obj_to_scan)) { 158 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 159 if (obj_to_scan->is_objArray() && 160 obj_to_scan->is_forwarded() && 161 obj_to_scan->forwardee() != obj_to_scan) { 162 scan_partial_array_and_push_remainder(obj_to_scan); 163 } else { 164 // object is in to_space 165 obj_to_scan->oop_iterate(&_to_space_closure); 166 } 167 } else { 168 // object is in old generation 169 obj_to_scan->oop_iterate(&_old_gen_closure); 170 } 171 } 172 } 173 // For the case of compressed oops, we have a private, non-shared 174 // overflow stack, so we eagerly drain it so as to more evenly 175 // distribute load early. Note: this may be good to do in 176 // general rather than delay for the final stealing phase. 177 // If applicable, we'll transfer a set of objects over to our 178 // work queue, allowing them to be stolen and draining our 179 // private overflow stack. 180 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 181 } 182 183 bool ParScanThreadState::take_from_overflow_stack() { 184 assert(ParGCUseLocalOverflow, "Else should not call"); 185 assert(young_gen()->overflow_list() == NULL, "Error"); 186 ObjToScanQueue* queue = work_queue(); 187 GrowableArray<oop>* of_stack = overflow_stack(); 188 uint num_overflow_elems = of_stack->length(); 189 uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4, 190 (juint)ParGCDesiredObjsFromOverflowList), 191 num_overflow_elems); 192 // Transfer the most recent num_take_elems from the overflow 193 // stack to our work queue. 194 for (size_t i = 0; i != num_take_elems; i++) { 195 oop cur = of_stack->pop(); 196 oop obj_to_push = cur->forwardee(); 197 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 198 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 199 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 200 if (should_be_partially_scanned(obj_to_push, cur)) { 201 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 202 obj_to_push = cur; 203 } 204 bool ok = queue->push(obj_to_push); 205 assert(ok, "Should have succeeded"); 206 } 207 assert(young_gen()->overflow_list() == NULL, "Error"); 208 return num_take_elems > 0; // was something transferred? 209 } 210 211 void ParScanThreadState::push_on_overflow_stack(oop p) { 212 assert(ParGCUseLocalOverflow, "Else should not call"); 213 overflow_stack()->push(p); 214 assert(young_gen()->overflow_list() == NULL, "Error"); 215 } 216 217 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 218 219 // Otherwise, if the object is small enough, try to reallocate the 220 // buffer. 221 HeapWord* obj = NULL; 222 if (!_to_space_full) { 223 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 224 Space* const sp = to_space(); 225 if (word_sz * 100 < 226 ParallelGCBufferWastePct * plab->word_sz()) { 227 // Is small enough; abandon this buffer and start a new one. 228 plab->retire(false, false); 229 size_t buf_size = plab->word_sz(); 230 HeapWord* buf_space = sp->par_allocate(buf_size); 231 if (buf_space == NULL) { 232 const size_t min_bytes = 233 ParGCAllocBuffer::min_size() << LogHeapWordSize; 234 size_t free_bytes = sp->free(); 235 while(buf_space == NULL && free_bytes >= min_bytes) { 236 buf_size = free_bytes >> LogHeapWordSize; 237 assert(buf_size == (size_t)align_object_size(buf_size), 238 "Invariant"); 239 buf_space = sp->par_allocate(buf_size); 240 free_bytes = sp->free(); 241 } 242 } 243 if (buf_space != NULL) { 244 plab->set_word_size(buf_size); 245 plab->set_buf(buf_space); 246 record_survivor_plab(buf_space, buf_size); 247 obj = plab->allocate(word_sz); 248 // Note that we cannot compare buf_size < word_sz below 249 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 250 assert(obj != NULL || plab->words_remaining() < word_sz, 251 "Else should have been able to allocate"); 252 // It's conceivable that we may be able to use the 253 // buffer we just grabbed for subsequent small requests 254 // even if not for this one. 255 } else { 256 // We're used up. 257 _to_space_full = true; 258 } 259 260 } else { 261 // Too large; allocate the object individually. 262 obj = sp->par_allocate(word_sz); 263 } 264 } 265 return obj; 266 } 267 268 269 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 270 size_t word_sz) { 271 // Is the alloc in the current alloc buffer? 272 if (to_space_alloc_buffer()->contains(obj)) { 273 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 274 "Should contain whole object."); 275 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 276 } else { 277 CollectedHeap::fill_with_object(obj, word_sz); 278 } 279 } 280 281 void ParScanThreadState::print_and_clear_promotion_failure_size() { 282 if (_promotion_failure_size != 0) { 283 if (PrintPromotionFailure) { 284 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 285 _thread_num, _promotion_failure_size); 286 } 287 _promotion_failure_size = 0; 288 } 289 } 290 291 class ParScanThreadStateSet: private ResourceArray { 292 public: 293 // Initializes states for the specified number of threads; 294 ParScanThreadStateSet(int num_threads, 295 Space& to_space, 296 ParNewGeneration& gen, 297 Generation& old_gen, 298 ObjToScanQueueSet& queue_set, 299 GrowableArray<oop>** overflow_stacks_, 300 size_t desired_plab_sz, 301 ParallelTaskTerminator& term); 302 303 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 304 305 inline ParScanThreadState& thread_state(int i); 306 307 void reset(bool promotion_failed); 308 void flush(); 309 310 #if TASKQUEUE_STATS 311 static void 312 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 313 void print_termination_stats(outputStream* const st = gclog_or_tty); 314 static void 315 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 316 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 317 void reset_stats(); 318 #endif // TASKQUEUE_STATS 319 320 private: 321 ParallelTaskTerminator& _term; 322 ParNewGeneration& _gen; 323 Generation& _next_gen; 324 }; 325 326 327 ParScanThreadStateSet::ParScanThreadStateSet( 328 int num_threads, Space& to_space, ParNewGeneration& gen, 329 Generation& old_gen, ObjToScanQueueSet& queue_set, 330 GrowableArray<oop>** overflow_stack_set_, 331 size_t desired_plab_sz, ParallelTaskTerminator& term) 332 : ResourceArray(sizeof(ParScanThreadState), num_threads), 333 _gen(gen), _next_gen(old_gen), _term(term) 334 { 335 assert(num_threads > 0, "sanity check!"); 336 // Initialize states. 337 for (int i = 0; i < num_threads; ++i) { 338 new ((ParScanThreadState*)_data + i) 339 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 340 overflow_stack_set_, desired_plab_sz, term); 341 } 342 } 343 344 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 345 { 346 assert(i >= 0 && i < length(), "sanity check!"); 347 return ((ParScanThreadState*)_data)[i]; 348 } 349 350 351 void ParScanThreadStateSet::reset(bool promotion_failed) 352 { 353 _term.reset_for_reuse(); 354 if (promotion_failed) { 355 for (int i = 0; i < length(); ++i) { 356 thread_state(i).print_and_clear_promotion_failure_size(); 357 } 358 } 359 } 360 361 #if TASKQUEUE_STATS 362 void 363 ParScanThreadState::reset_stats() 364 { 365 taskqueue_stats().reset(); 366 _term_attempts = 0; 367 _overflow_refills = 0; 368 _overflow_refill_objs = 0; 369 } 370 371 void ParScanThreadStateSet::reset_stats() 372 { 373 for (int i = 0; i < length(); ++i) { 374 thread_state(i).reset_stats(); 375 } 376 } 377 378 void 379 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 380 { 381 st->print_raw_cr("GC Termination Stats"); 382 st->print_raw_cr(" elapsed --strong roots-- " 383 "-------termination-------"); 384 st->print_raw_cr("thr ms ms % " 385 " ms % attempts"); 386 st->print_raw_cr("--- --------- --------- ------ " 387 "--------- ------ --------"); 388 } 389 390 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 391 { 392 print_termination_stats_hdr(st); 393 394 for (int i = 0; i < length(); ++i) { 395 const ParScanThreadState & pss = thread_state(i); 396 const double elapsed_ms = pss.elapsed_time() * 1000.0; 397 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 398 const double term_ms = pss.term_time() * 1000.0; 399 st->print_cr("%3d %9.2f %9.2f %6.2f " 400 "%9.2f %6.2f " SIZE_FORMAT_W(8), 401 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 402 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 403 } 404 } 405 406 // Print stats related to work queue activity. 407 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 408 { 409 st->print_raw_cr("GC Task Stats"); 410 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 411 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 412 } 413 414 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 415 { 416 print_taskqueue_stats_hdr(st); 417 418 TaskQueueStats totals; 419 for (int i = 0; i < length(); ++i) { 420 const ParScanThreadState & pss = thread_state(i); 421 const TaskQueueStats & stats = pss.taskqueue_stats(); 422 st->print("%3d ", i); stats.print(st); st->cr(); 423 totals += stats; 424 425 if (pss.overflow_refills() > 0) { 426 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 427 SIZE_FORMAT_W(10) " overflow objects", 428 pss.overflow_refills(), pss.overflow_refill_objs()); 429 } 430 } 431 st->print("tot "); totals.print(st); st->cr(); 432 433 DEBUG_ONLY(totals.verify()); 434 } 435 #endif // TASKQUEUE_STATS 436 437 void ParScanThreadStateSet::flush() 438 { 439 // Work in this loop should be kept as lightweight as 440 // possible since this might otherwise become a bottleneck 441 // to scaling. Should we add heavy-weight work into this 442 // loop, consider parallelizing the loop into the worker threads. 443 for (int i = 0; i < length(); ++i) { 444 ParScanThreadState& par_scan_state = thread_state(i); 445 446 // Flush stats related to To-space PLAB activity and 447 // retire the last buffer. 448 par_scan_state.to_space_alloc_buffer()-> 449 flush_stats_and_retire(_gen.plab_stats(), 450 false /* !retain */); 451 452 // Every thread has its own age table. We need to merge 453 // them all into one. 454 ageTable *local_table = par_scan_state.age_table(); 455 _gen.age_table()->merge(local_table); 456 457 // Inform old gen that we're done. 458 _next_gen.par_promote_alloc_done(i); 459 _next_gen.par_oop_since_save_marks_iterate_done(i); 460 } 461 462 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 463 // We need to call this even when ResizeOldPLAB is disabled 464 // so as to avoid breaking some asserts. While we may be able 465 // to avoid this by reorganizing the code a bit, I am loathe 466 // to do that unless we find cases where ergo leads to bad 467 // performance. 468 CFLS_LAB::compute_desired_plab_size(); 469 } 470 } 471 472 ParScanClosure::ParScanClosure(ParNewGeneration* g, 473 ParScanThreadState* par_scan_state) : 474 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) 475 { 476 assert(_g->level() == 0, "Optimized for youngest generation"); 477 _boundary = _g->reserved().end(); 478 } 479 480 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 481 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 482 483 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 484 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 485 486 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 487 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 488 489 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 490 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 491 492 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 493 ParScanThreadState* par_scan_state) 494 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 495 {} 496 497 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 498 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 499 500 #ifdef WIN32 501 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 502 #endif 503 504 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 505 ParScanThreadState* par_scan_state_, 506 ParScanWithoutBarrierClosure* to_space_closure_, 507 ParScanWithBarrierClosure* old_gen_closure_, 508 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 509 ParNewGeneration* par_gen_, 510 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 511 ObjToScanQueueSet* task_queues_, 512 ParallelTaskTerminator* terminator_) : 513 514 _par_scan_state(par_scan_state_), 515 _to_space_closure(to_space_closure_), 516 _old_gen_closure(old_gen_closure_), 517 _to_space_root_closure(to_space_root_closure_), 518 _old_gen_root_closure(old_gen_root_closure_), 519 _par_gen(par_gen_), 520 _task_queues(task_queues_), 521 _terminator(terminator_) 522 {} 523 524 void ParEvacuateFollowersClosure::do_void() { 525 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 526 527 while (true) { 528 529 // Scan to-space and old-gen objs until we run out of both. 530 oop obj_to_scan; 531 par_scan_state()->trim_queues(0); 532 533 // We have no local work, attempt to steal from other threads. 534 535 // attempt to steal work from promoted. 536 if (task_queues()->steal(par_scan_state()->thread_num(), 537 par_scan_state()->hash_seed(), 538 obj_to_scan)) { 539 bool res = work_q->push(obj_to_scan); 540 assert(res, "Empty queue should have room for a push."); 541 542 // if successful, goto Start. 543 continue; 544 545 // try global overflow list. 546 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 547 continue; 548 } 549 550 // Otherwise, offer termination. 551 par_scan_state()->start_term_time(); 552 if (terminator()->offer_termination()) break; 553 par_scan_state()->end_term_time(); 554 } 555 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 556 "Broken overflow list?"); 557 // Finish the last termination pause. 558 par_scan_state()->end_term_time(); 559 } 560 561 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 562 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 563 AbstractGangTask("ParNewGeneration collection"), 564 _gen(gen), _next_gen(next_gen), 565 _young_old_boundary(young_old_boundary), 566 _state_set(state_set) 567 {} 568 569 void ParNewGenTask::work(int i) { 570 GenCollectedHeap* gch = GenCollectedHeap::heap(); 571 // Since this is being done in a separate thread, need new resource 572 // and handle marks. 573 ResourceMark rm; 574 HandleMark hm; 575 // We would need multiple old-gen queues otherwise. 576 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 577 578 Generation* old_gen = gch->next_gen(_gen); 579 580 ParScanThreadState& par_scan_state = _state_set->thread_state(i); 581 par_scan_state.set_young_old_boundary(_young_old_boundary); 582 583 par_scan_state.start_strong_roots(); 584 gch->gen_process_strong_roots(_gen->level(), 585 true, // Process younger gens, if any, 586 // as strong roots. 587 false, // no scope; this is parallel code 588 false, // not collecting perm generation. 589 SharedHeap::SO_AllClasses, 590 &par_scan_state.to_space_root_closure(), 591 true, // walk *all* scavengable nmethods 592 &par_scan_state.older_gen_closure()); 593 par_scan_state.end_strong_roots(); 594 595 // "evacuate followers". 596 par_scan_state.evacuate_followers_closure().do_void(); 597 } 598 599 #ifdef _MSC_VER 600 #pragma warning( push ) 601 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 602 #endif 603 ParNewGeneration:: 604 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 605 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 606 _overflow_list(NULL), 607 _is_alive_closure(this), 608 _plab_stats(YoungPLABSize, PLABWeight) 609 { 610 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 611 NOT_PRODUCT(_num_par_pushes = 0;) 612 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 613 guarantee(_task_queues != NULL, "task_queues allocation failure."); 614 615 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 616 ObjToScanQueue *q = new ObjToScanQueue(); 617 guarantee(q != NULL, "work_queue Allocation failure."); 618 _task_queues->register_queue(i1, q); 619 } 620 621 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 622 _task_queues->queue(i2)->initialize(); 623 624 _overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads); 625 guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure"); 626 for (uint i = 0; i < ParallelGCThreads; i++) { 627 if (ParGCUseLocalOverflow) { 628 _overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true); 629 guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure."); 630 } else { 631 _overflow_stacks[i] = NULL; 632 } 633 } 634 635 if (UsePerfData) { 636 EXCEPTION_MARK; 637 ResourceMark rm; 638 639 const char* cname = 640 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 641 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 642 ParallelGCThreads, CHECK); 643 } 644 } 645 #ifdef _MSC_VER 646 #pragma warning( pop ) 647 #endif 648 649 // ParNewGeneration:: 650 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 651 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 652 653 template <class T> 654 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 655 #ifdef ASSERT 656 { 657 assert(!oopDesc::is_null(*p), "expected non-null ref"); 658 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 659 // We never expect to see a null reference being processed 660 // as a weak reference. 661 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 662 } 663 #endif // ASSERT 664 665 _par_cl->do_oop_nv(p); 666 667 if (Universe::heap()->is_in_reserved(p)) { 668 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 669 _rs->write_ref_field_gc_par(p, obj); 670 } 671 } 672 673 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 674 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 675 676 // ParNewGeneration:: 677 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 678 DefNewGeneration::KeepAliveClosure(cl) {} 679 680 template <class T> 681 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 682 #ifdef ASSERT 683 { 684 assert(!oopDesc::is_null(*p), "expected non-null ref"); 685 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 686 // We never expect to see a null reference being processed 687 // as a weak reference. 688 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 689 } 690 #endif // ASSERT 691 692 _cl->do_oop_nv(p); 693 694 if (Universe::heap()->is_in_reserved(p)) { 695 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 696 _rs->write_ref_field_gc_par(p, obj); 697 } 698 } 699 700 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 701 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 702 703 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 704 T heap_oop = oopDesc::load_heap_oop(p); 705 if (!oopDesc::is_null(heap_oop)) { 706 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 707 if ((HeapWord*)obj < _boundary) { 708 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 709 oop new_obj = obj->is_forwarded() 710 ? obj->forwardee() 711 : _g->DefNewGeneration::copy_to_survivor_space(obj); 712 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 713 } 714 if (_gc_barrier) { 715 // If p points to a younger generation, mark the card. 716 if ((HeapWord*)obj < _gen_boundary) { 717 _rs->write_ref_field_gc_par(p, obj); 718 } 719 } 720 } 721 } 722 723 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 724 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 725 726 class ParNewRefProcTaskProxy: public AbstractGangTask { 727 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 728 public: 729 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 730 Generation& next_gen, 731 HeapWord* young_old_boundary, 732 ParScanThreadStateSet& state_set); 733 734 private: 735 virtual void work(int i); 736 737 private: 738 ParNewGeneration& _gen; 739 ProcessTask& _task; 740 Generation& _next_gen; 741 HeapWord* _young_old_boundary; 742 ParScanThreadStateSet& _state_set; 743 }; 744 745 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 746 ProcessTask& task, ParNewGeneration& gen, 747 Generation& next_gen, 748 HeapWord* young_old_boundary, 749 ParScanThreadStateSet& state_set) 750 : AbstractGangTask("ParNewGeneration parallel reference processing"), 751 _gen(gen), 752 _task(task), 753 _next_gen(next_gen), 754 _young_old_boundary(young_old_boundary), 755 _state_set(state_set) 756 { 757 } 758 759 void ParNewRefProcTaskProxy::work(int i) 760 { 761 ResourceMark rm; 762 HandleMark hm; 763 ParScanThreadState& par_scan_state = _state_set.thread_state(i); 764 par_scan_state.set_young_old_boundary(_young_old_boundary); 765 _task.work(i, par_scan_state.is_alive_closure(), 766 par_scan_state.keep_alive_closure(), 767 par_scan_state.evacuate_followers_closure()); 768 } 769 770 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 771 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 772 EnqueueTask& _task; 773 774 public: 775 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 776 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 777 _task(task) 778 { } 779 780 virtual void work(int i) 781 { 782 _task.work(i); 783 } 784 }; 785 786 787 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 788 { 789 GenCollectedHeap* gch = GenCollectedHeap::heap(); 790 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 791 "not a generational heap"); 792 WorkGang* workers = gch->workers(); 793 assert(workers != NULL, "Need parallel worker threads."); 794 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 795 _generation.reserved().end(), _state_set); 796 workers->run_task(&rp_task); 797 _state_set.reset(_generation.promotion_failed()); 798 } 799 800 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 801 { 802 GenCollectedHeap* gch = GenCollectedHeap::heap(); 803 WorkGang* workers = gch->workers(); 804 assert(workers != NULL, "Need parallel worker threads."); 805 ParNewRefEnqueueTaskProxy enq_task(task); 806 workers->run_task(&enq_task); 807 } 808 809 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 810 { 811 _state_set.flush(); 812 GenCollectedHeap* gch = GenCollectedHeap::heap(); 813 gch->set_par_threads(0); // 0 ==> non-parallel. 814 gch->save_marks(); 815 } 816 817 ScanClosureWithParBarrier:: 818 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 819 ScanClosure(g, gc_barrier) {} 820 821 EvacuateFollowersClosureGeneral:: 822 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 823 OopsInGenClosure* cur, 824 OopsInGenClosure* older) : 825 _gch(gch), _level(level), 826 _scan_cur_or_nonheap(cur), _scan_older(older) 827 {} 828 829 void EvacuateFollowersClosureGeneral::do_void() { 830 do { 831 // Beware: this call will lead to closure applications via virtual 832 // calls. 833 _gch->oop_since_save_marks_iterate(_level, 834 _scan_cur_or_nonheap, 835 _scan_older); 836 } while (!_gch->no_allocs_since_save_marks(_level)); 837 } 838 839 840 bool ParNewGeneration::_avoid_promotion_undo = false; 841 842 void ParNewGeneration::adjust_desired_tenuring_threshold() { 843 // Set the desired survivor size to half the real survivor space 844 _tenuring_threshold = 845 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 846 } 847 848 // A Generation that does parallel young-gen collection. 849 850 void ParNewGeneration::collect(bool full, 851 bool clear_all_soft_refs, 852 size_t size, 853 bool is_tlab) { 854 assert(full || size > 0, "otherwise we don't want to collect"); 855 GenCollectedHeap* gch = GenCollectedHeap::heap(); 856 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 857 "not a CMS generational heap"); 858 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 859 WorkGang* workers = gch->workers(); 860 _next_gen = gch->next_gen(this); 861 assert(_next_gen != NULL, 862 "This must be the youngest gen, and not the only gen"); 863 assert(gch->n_gens() == 2, 864 "Par collection currently only works with single older gen."); 865 // Do we have to avoid promotion_undo? 866 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 867 set_avoid_promotion_undo(true); 868 } 869 870 // If the next generation is too full to accomodate worst-case promotion 871 // from this generation, pass on collection; let the next generation 872 // do it. 873 if (!collection_attempt_is_safe()) { 874 gch->set_incremental_collection_will_fail(); 875 return; 876 } 877 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 878 879 init_assuming_no_promotion_failure(); 880 881 if (UseAdaptiveSizePolicy) { 882 set_survivor_overflow(false); 883 size_policy->minor_collection_begin(); 884 } 885 886 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); 887 // Capture heap used before collection (for printing). 888 size_t gch_prev_used = gch->used(); 889 890 SpecializationStats::clear(); 891 892 age_table()->clear(); 893 to()->clear(SpaceDecorator::Mangle); 894 895 gch->save_marks(); 896 assert(workers != NULL, "Need parallel worker threads."); 897 ParallelTaskTerminator _term(workers->total_workers(), task_queues()); 898 ParScanThreadStateSet thread_state_set(workers->total_workers(), 899 *to(), *this, *_next_gen, *task_queues(), 900 _overflow_stacks, desired_plab_sz(), _term); 901 902 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 903 int n_workers = workers->total_workers(); 904 gch->set_par_threads(n_workers); 905 gch->rem_set()->prepare_for_younger_refs_iterate(true); 906 // It turns out that even when we're using 1 thread, doing the work in a 907 // separate thread causes wide variance in run times. We can't help this 908 // in the multi-threaded case, but we special-case n=1 here to get 909 // repeatable measurements of the 1-thread overhead of the parallel code. 910 if (n_workers > 1) { 911 GenCollectedHeap::StrongRootsScope srs(gch); 912 workers->run_task(&tsk); 913 } else { 914 GenCollectedHeap::StrongRootsScope srs(gch); 915 tsk.work(0); 916 } 917 thread_state_set.reset(promotion_failed()); 918 919 // Process (weak) reference objects found during scavenge. 920 ReferenceProcessor* rp = ref_processor(); 921 IsAliveClosure is_alive(this); 922 ScanWeakRefClosure scan_weak_ref(this); 923 KeepAliveClosure keep_alive(&scan_weak_ref); 924 ScanClosure scan_without_gc_barrier(this, false); 925 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 926 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 927 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 928 &scan_without_gc_barrier, &scan_with_gc_barrier); 929 rp->setup_policy(clear_all_soft_refs); 930 if (rp->processing_is_mt()) { 931 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 932 rp->process_discovered_references(&is_alive, &keep_alive, 933 &evacuate_followers, &task_executor); 934 } else { 935 thread_state_set.flush(); 936 gch->set_par_threads(0); // 0 ==> non-parallel. 937 gch->save_marks(); 938 rp->process_discovered_references(&is_alive, &keep_alive, 939 &evacuate_followers, NULL); 940 } 941 if (!promotion_failed()) { 942 // Swap the survivor spaces. 943 eden()->clear(SpaceDecorator::Mangle); 944 from()->clear(SpaceDecorator::Mangle); 945 if (ZapUnusedHeapArea) { 946 // This is now done here because of the piece-meal mangling which 947 // can check for valid mangling at intermediate points in the 948 // collection(s). When a minor collection fails to collect 949 // sufficient space resizing of the young generation can occur 950 // an redistribute the spaces in the young generation. Mangle 951 // here so that unzapped regions don't get distributed to 952 // other spaces. 953 to()->mangle_unused_area(); 954 } 955 swap_spaces(); 956 957 // A successful scavenge should restart the GC time limit count which is 958 // for full GC's. 959 size_policy->reset_gc_overhead_limit_count(); 960 961 assert(to()->is_empty(), "to space should be empty now"); 962 } else { 963 assert(HandlePromotionFailure, 964 "Should only be here if promotion failure handling is on"); 965 if (_promo_failure_scan_stack != NULL) { 966 // Can be non-null because of reference processing. 967 // Free stack with its elements. 968 delete _promo_failure_scan_stack; 969 _promo_failure_scan_stack = NULL; 970 } 971 remove_forwarding_pointers(); 972 if (PrintGCDetails) { 973 gclog_or_tty->print(" (promotion failed)"); 974 } 975 // All the spaces are in play for mark-sweep. 976 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 977 from()->set_next_compaction_space(to()); 978 gch->set_incremental_collection_will_fail(); 979 // Inform the next generation that a promotion failure occurred. 980 _next_gen->promotion_failure_occurred(); 981 982 // Reset the PromotionFailureALot counters. 983 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 984 } 985 // set new iteration safe limit for the survivor spaces 986 from()->set_concurrent_iteration_safe_limit(from()->top()); 987 to()->set_concurrent_iteration_safe_limit(to()->top()); 988 989 adjust_desired_tenuring_threshold(); 990 if (ResizePLAB) { 991 plab_stats()->adjust_desired_plab_sz(); 992 } 993 994 if (PrintGC && !PrintGCDetails) { 995 gch->print_heap_change(gch_prev_used); 996 } 997 998 if (PrintGCDetails && ParallelGCVerbose) { 999 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1000 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1001 } 1002 1003 if (UseAdaptiveSizePolicy) { 1004 size_policy->minor_collection_end(gch->gc_cause()); 1005 size_policy->avg_survived()->sample(from()->used()); 1006 } 1007 1008 update_time_of_last_gc(os::javaTimeMillis()); 1009 1010 SpecializationStats::print(); 1011 1012 rp->set_enqueuing_is_done(true); 1013 if (rp->processing_is_mt()) { 1014 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1015 rp->enqueue_discovered_references(&task_executor); 1016 } else { 1017 rp->enqueue_discovered_references(NULL); 1018 } 1019 rp->verify_no_references_recorded(); 1020 } 1021 1022 static int sum; 1023 void ParNewGeneration::waste_some_time() { 1024 for (int i = 0; i < 100; i++) { 1025 sum += i; 1026 } 1027 } 1028 1029 static const oop ClaimedForwardPtr = oop(0x4); 1030 1031 // Because of concurrency, there are times where an object for which 1032 // "is_forwarded()" is true contains an "interim" forwarding pointer 1033 // value. Such a value will soon be overwritten with a real value. 1034 // This method requires "obj" to have a forwarding pointer, and waits, if 1035 // necessary for a real one to be inserted, and returns it. 1036 1037 oop ParNewGeneration::real_forwardee(oop obj) { 1038 oop forward_ptr = obj->forwardee(); 1039 if (forward_ptr != ClaimedForwardPtr) { 1040 return forward_ptr; 1041 } else { 1042 return real_forwardee_slow(obj); 1043 } 1044 } 1045 1046 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1047 // Spin-read if it is claimed but not yet written by another thread. 1048 oop forward_ptr = obj->forwardee(); 1049 while (forward_ptr == ClaimedForwardPtr) { 1050 waste_some_time(); 1051 assert(obj->is_forwarded(), "precondition"); 1052 forward_ptr = obj->forwardee(); 1053 } 1054 return forward_ptr; 1055 } 1056 1057 #ifdef ASSERT 1058 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1059 return 1060 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1061 || Universe::heap()->is_in_reserved(p); 1062 } 1063 #endif 1064 1065 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1066 if ((m != markOopDesc::prototype()) && 1067 (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) { 1068 MutexLocker ml(ParGCRareEvent_lock); 1069 DefNewGeneration::preserve_mark_if_necessary(obj, m); 1070 } 1071 } 1072 1073 // Multiple GC threads may try to promote an object. If the object 1074 // is successfully promoted, a forwarding pointer will be installed in 1075 // the object in the young generation. This method claims the right 1076 // to install the forwarding pointer before it copies the object, 1077 // thus avoiding the need to undo the copy as in 1078 // copy_to_survivor_space_avoiding_with_undo. 1079 1080 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1081 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1082 // In the sequential version, this assert also says that the object is 1083 // not forwarded. That might not be the case here. It is the case that 1084 // the caller observed it to be not forwarded at some time in the past. 1085 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1086 1087 // The sequential code read "old->age()" below. That doesn't work here, 1088 // since the age is in the mark word, and that might be overwritten with 1089 // a forwarding pointer by a parallel thread. So we must save the mark 1090 // word in a local and then analyze it. 1091 oopDesc dummyOld; 1092 dummyOld.set_mark(m); 1093 assert(!dummyOld.is_forwarded(), 1094 "should not be called with forwarding pointer mark word."); 1095 1096 oop new_obj = NULL; 1097 oop forward_ptr; 1098 1099 // Try allocating obj in to-space (unless too old) 1100 if (dummyOld.age() < tenuring_threshold()) { 1101 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1102 if (new_obj == NULL) { 1103 set_survivor_overflow(true); 1104 } 1105 } 1106 1107 if (new_obj == NULL) { 1108 // Either to-space is full or we decided to promote 1109 // try allocating obj tenured 1110 1111 // Attempt to install a null forwarding pointer (atomically), 1112 // to claim the right to install the real forwarding pointer. 1113 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1114 if (forward_ptr != NULL) { 1115 // someone else beat us to it. 1116 return real_forwardee(old); 1117 } 1118 1119 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1120 old, m, sz); 1121 1122 if (new_obj == NULL) { 1123 if (!HandlePromotionFailure) { 1124 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag 1125 // is incorrectly set. In any case, its seriously wrong to be here! 1126 vm_exit_out_of_memory(sz*wordSize, "promotion"); 1127 } 1128 // promotion failed, forward to self 1129 _promotion_failed = true; 1130 new_obj = old; 1131 1132 preserve_mark_if_necessary(old, m); 1133 // Log the size of the maiden promotion failure 1134 par_scan_state->log_promotion_failure(sz); 1135 } 1136 1137 old->forward_to(new_obj); 1138 forward_ptr = NULL; 1139 } else { 1140 // Is in to-space; do copying ourselves. 1141 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1142 forward_ptr = old->forward_to_atomic(new_obj); 1143 // Restore the mark word copied above. 1144 new_obj->set_mark(m); 1145 // Increment age if obj still in new generation 1146 new_obj->incr_age(); 1147 par_scan_state->age_table()->add(new_obj, sz); 1148 } 1149 assert(new_obj != NULL, "just checking"); 1150 1151 if (forward_ptr == NULL) { 1152 oop obj_to_push = new_obj; 1153 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1154 // Length field used as index of next element to be scanned. 1155 // Real length can be obtained from real_forwardee() 1156 arrayOop(old)->set_length(0); 1157 obj_to_push = old; 1158 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1159 "push forwarded object"); 1160 } 1161 // Push it on one of the queues of to-be-scanned objects. 1162 bool simulate_overflow = false; 1163 NOT_PRODUCT( 1164 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1165 // simulate a stack overflow 1166 simulate_overflow = true; 1167 } 1168 ) 1169 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1170 // Add stats for overflow pushes. 1171 if (Verbose && PrintGCDetails) { 1172 gclog_or_tty->print("queue overflow!\n"); 1173 } 1174 push_on_overflow_list(old, par_scan_state); 1175 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1176 } 1177 1178 return new_obj; 1179 } 1180 1181 // Oops. Someone beat us to it. Undo the allocation. Where did we 1182 // allocate it? 1183 if (is_in_reserved(new_obj)) { 1184 // Must be in to_space. 1185 assert(to()->is_in_reserved(new_obj), "Checking"); 1186 if (forward_ptr == ClaimedForwardPtr) { 1187 // Wait to get the real forwarding pointer value. 1188 forward_ptr = real_forwardee(old); 1189 } 1190 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1191 } 1192 1193 return forward_ptr; 1194 } 1195 1196 1197 // Multiple GC threads may try to promote the same object. If two 1198 // or more GC threads copy the object, only one wins the race to install 1199 // the forwarding pointer. The other threads have to undo their copy. 1200 1201 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1202 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1203 1204 // In the sequential version, this assert also says that the object is 1205 // not forwarded. That might not be the case here. It is the case that 1206 // the caller observed it to be not forwarded at some time in the past. 1207 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1208 1209 // The sequential code read "old->age()" below. That doesn't work here, 1210 // since the age is in the mark word, and that might be overwritten with 1211 // a forwarding pointer by a parallel thread. So we must save the mark 1212 // word here, install it in a local oopDesc, and then analyze it. 1213 oopDesc dummyOld; 1214 dummyOld.set_mark(m); 1215 assert(!dummyOld.is_forwarded(), 1216 "should not be called with forwarding pointer mark word."); 1217 1218 bool failed_to_promote = false; 1219 oop new_obj = NULL; 1220 oop forward_ptr; 1221 1222 // Try allocating obj in to-space (unless too old) 1223 if (dummyOld.age() < tenuring_threshold()) { 1224 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1225 if (new_obj == NULL) { 1226 set_survivor_overflow(true); 1227 } 1228 } 1229 1230 if (new_obj == NULL) { 1231 // Either to-space is full or we decided to promote 1232 // try allocating obj tenured 1233 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1234 old, m, sz); 1235 1236 if (new_obj == NULL) { 1237 if (!HandlePromotionFailure) { 1238 // A failed promotion likely means the MaxLiveObjectEvacuationRatio 1239 // flag is incorrectly set. In any case, its seriously wrong to be 1240 // here! 1241 vm_exit_out_of_memory(sz*wordSize, "promotion"); 1242 } 1243 // promotion failed, forward to self 1244 forward_ptr = old->forward_to_atomic(old); 1245 new_obj = old; 1246 1247 if (forward_ptr != NULL) { 1248 return forward_ptr; // someone else succeeded 1249 } 1250 1251 _promotion_failed = true; 1252 failed_to_promote = true; 1253 1254 preserve_mark_if_necessary(old, m); 1255 // Log the size of the maiden promotion failure 1256 par_scan_state->log_promotion_failure(sz); 1257 } 1258 } else { 1259 // Is in to-space; do copying ourselves. 1260 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1261 // Restore the mark word copied above. 1262 new_obj->set_mark(m); 1263 // Increment age if new_obj still in new generation 1264 new_obj->incr_age(); 1265 par_scan_state->age_table()->add(new_obj, sz); 1266 } 1267 assert(new_obj != NULL, "just checking"); 1268 1269 // Now attempt to install the forwarding pointer (atomically). 1270 // We have to copy the mark word before overwriting with forwarding 1271 // ptr, so we can restore it below in the copy. 1272 if (!failed_to_promote) { 1273 forward_ptr = old->forward_to_atomic(new_obj); 1274 } 1275 1276 if (forward_ptr == NULL) { 1277 oop obj_to_push = new_obj; 1278 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1279 // Length field used as index of next element to be scanned. 1280 // Real length can be obtained from real_forwardee() 1281 arrayOop(old)->set_length(0); 1282 obj_to_push = old; 1283 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1284 "push forwarded object"); 1285 } 1286 // Push it on one of the queues of to-be-scanned objects. 1287 bool simulate_overflow = false; 1288 NOT_PRODUCT( 1289 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1290 // simulate a stack overflow 1291 simulate_overflow = true; 1292 } 1293 ) 1294 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1295 // Add stats for overflow pushes. 1296 push_on_overflow_list(old, par_scan_state); 1297 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1298 } 1299 1300 return new_obj; 1301 } 1302 1303 // Oops. Someone beat us to it. Undo the allocation. Where did we 1304 // allocate it? 1305 if (is_in_reserved(new_obj)) { 1306 // Must be in to_space. 1307 assert(to()->is_in_reserved(new_obj), "Checking"); 1308 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1309 } else { 1310 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1311 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1312 (HeapWord*)new_obj, sz); 1313 } 1314 1315 return forward_ptr; 1316 } 1317 1318 #ifndef PRODUCT 1319 // It's OK to call this multi-threaded; the worst thing 1320 // that can happen is that we'll get a bunch of closely 1321 // spaced simulated oveflows, but that's OK, in fact 1322 // probably good as it would exercise the overflow code 1323 // under contention. 1324 bool ParNewGeneration::should_simulate_overflow() { 1325 if (_overflow_counter-- <= 0) { // just being defensive 1326 _overflow_counter = ParGCWorkQueueOverflowInterval; 1327 return true; 1328 } else { 1329 return false; 1330 } 1331 } 1332 #endif 1333 1334 // In case we are using compressed oops, we need to be careful. 1335 // If the object being pushed is an object array, then its length 1336 // field keeps track of the "grey boundary" at which the next 1337 // incremental scan will be done (see ParGCArrayScanChunk). 1338 // When using compressed oops, this length field is kept in the 1339 // lower 32 bits of the erstwhile klass word and cannot be used 1340 // for the overflow chaining pointer (OCP below). As such the OCP 1341 // would itself need to be compressed into the top 32-bits in this 1342 // case. Unfortunately, see below, in the event that we have a 1343 // promotion failure, the node to be pushed on the list can be 1344 // outside of the Java heap, so the heap-based pointer compression 1345 // would not work (we would have potential aliasing between C-heap 1346 // and Java-heap pointers). For this reason, when using compressed 1347 // oops, we simply use a worker-thread-local, non-shared overflow 1348 // list in the form of a growable array, with a slightly different 1349 // overflow stack draining strategy. If/when we start using fat 1350 // stacks here, we can go back to using (fat) pointer chains 1351 // (although some performance comparisons would be useful since 1352 // single global lists have their own performance disadvantages 1353 // as we were made painfully aware not long ago, see 6786503). 1354 #define BUSY (oop(0x1aff1aff)) 1355 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1356 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1357 if (ParGCUseLocalOverflow) { 1358 // In the case of compressed oops, we use a private, not-shared 1359 // overflow stack. 1360 par_scan_state->push_on_overflow_stack(from_space_obj); 1361 } else { 1362 assert(!UseCompressedOops, "Error"); 1363 // if the object has been forwarded to itself, then we cannot 1364 // use the klass pointer for the linked list. Instead we have 1365 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1366 // XXX This is horribly inefficient when a promotion failure occurs 1367 // and should be fixed. XXX FIX ME !!! 1368 #ifndef PRODUCT 1369 Atomic::inc_ptr(&_num_par_pushes); 1370 assert(_num_par_pushes > 0, "Tautology"); 1371 #endif 1372 if (from_space_obj->forwardee() == from_space_obj) { 1373 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); 1374 listhead->forward_to(from_space_obj); 1375 from_space_obj = listhead; 1376 } 1377 oop observed_overflow_list = _overflow_list; 1378 oop cur_overflow_list; 1379 do { 1380 cur_overflow_list = observed_overflow_list; 1381 if (cur_overflow_list != BUSY) { 1382 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1383 } else { 1384 from_space_obj->set_klass_to_list_ptr(NULL); 1385 } 1386 observed_overflow_list = 1387 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1388 } while (cur_overflow_list != observed_overflow_list); 1389 } 1390 } 1391 1392 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1393 bool res; 1394 1395 if (ParGCUseLocalOverflow) { 1396 res = par_scan_state->take_from_overflow_stack(); 1397 } else { 1398 assert(!UseCompressedOops, "Error"); 1399 res = take_from_overflow_list_work(par_scan_state); 1400 } 1401 return res; 1402 } 1403 1404 1405 // *NOTE*: The overflow list manipulation code here and 1406 // in CMSCollector:: are very similar in shape, 1407 // except that in the CMS case we thread the objects 1408 // directly into the list via their mark word, and do 1409 // not need to deal with special cases below related 1410 // to chunking of object arrays and promotion failure 1411 // handling. 1412 // CR 6797058 has been filed to attempt consolidation of 1413 // the common code. 1414 // Because of the common code, if you make any changes in 1415 // the code below, please check the CMS version to see if 1416 // similar changes might be needed. 1417 // See CMSCollector::par_take_from_overflow_list() for 1418 // more extensive documentation comments. 1419 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1420 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1421 // How many to take? 1422 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1423 (size_t)ParGCDesiredObjsFromOverflowList); 1424 1425 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1426 assert(!UseCompressedOops, "Error"); 1427 if (_overflow_list == NULL) return false; 1428 1429 // Otherwise, there was something there; try claiming the list. 1430 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1431 // Trim off a prefix of at most objsFromOverflow items 1432 Thread* tid = Thread::current(); 1433 size_t spin_count = (size_t)ParallelGCThreads; 1434 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1435 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1436 // someone grabbed it before we did ... 1437 // ... we spin for a short while... 1438 os::sleep(tid, sleep_time_millis, false); 1439 if (_overflow_list == NULL) { 1440 // nothing left to take 1441 return false; 1442 } else if (_overflow_list != BUSY) { 1443 // try and grab the prefix 1444 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1445 } 1446 } 1447 if (prefix == NULL || prefix == BUSY) { 1448 // Nothing to take or waited long enough 1449 if (prefix == NULL) { 1450 // Write back the NULL in case we overwrote it with BUSY above 1451 // and it is still the same value. 1452 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1453 } 1454 return false; 1455 } 1456 assert(prefix != NULL && prefix != BUSY, "Error"); 1457 size_t i = 1; 1458 oop cur = prefix; 1459 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1460 i++; cur = oop(cur->klass()); 1461 } 1462 1463 // Reattach remaining (suffix) to overflow list 1464 if (cur->klass_or_null() == NULL) { 1465 // Write back the NULL in lieu of the BUSY we wrote 1466 // above and it is still the same value. 1467 if (_overflow_list == BUSY) { 1468 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1469 } 1470 } else { 1471 assert(cur->klass_or_null() != BUSY, "Error"); 1472 oop suffix = oop(cur->klass()); // suffix will be put back on global list 1473 cur->set_klass_to_list_ptr(NULL); // break off suffix 1474 // It's possible that the list is still in the empty(busy) state 1475 // we left it in a short while ago; in that case we may be 1476 // able to place back the suffix. 1477 oop observed_overflow_list = _overflow_list; 1478 oop cur_overflow_list = observed_overflow_list; 1479 bool attached = false; 1480 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1481 observed_overflow_list = 1482 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1483 if (cur_overflow_list == observed_overflow_list) { 1484 attached = true; 1485 break; 1486 } else cur_overflow_list = observed_overflow_list; 1487 } 1488 if (!attached) { 1489 // Too bad, someone else got in in between; we'll need to do a splice. 1490 // Find the last item of suffix list 1491 oop last = suffix; 1492 while (last->klass_or_null() != NULL) { 1493 last = oop(last->klass()); 1494 } 1495 // Atomically prepend suffix to current overflow list 1496 observed_overflow_list = _overflow_list; 1497 do { 1498 cur_overflow_list = observed_overflow_list; 1499 if (cur_overflow_list != BUSY) { 1500 // Do the splice ... 1501 last->set_klass_to_list_ptr(cur_overflow_list); 1502 } else { // cur_overflow_list == BUSY 1503 last->set_klass_to_list_ptr(NULL); 1504 } 1505 observed_overflow_list = 1506 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1507 } while (cur_overflow_list != observed_overflow_list); 1508 } 1509 } 1510 1511 // Push objects on prefix list onto this thread's work queue 1512 assert(prefix != NULL && prefix != BUSY, "program logic"); 1513 cur = prefix; 1514 ssize_t n = 0; 1515 while (cur != NULL) { 1516 oop obj_to_push = cur->forwardee(); 1517 oop next = oop(cur->klass_or_null()); 1518 cur->set_klass(obj_to_push->klass()); 1519 // This may be an array object that is self-forwarded. In that case, the list pointer 1520 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1521 if (!is_in_reserved(cur)) { 1522 // This can become a scaling bottleneck when there is work queue overflow coincident 1523 // with promotion failure. 1524 oopDesc* f = cur; 1525 FREE_C_HEAP_ARRAY(oopDesc, f); 1526 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1527 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1528 obj_to_push = cur; 1529 } 1530 bool ok = work_q->push(obj_to_push); 1531 assert(ok, "Should have succeeded"); 1532 cur = next; 1533 n++; 1534 } 1535 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1536 #ifndef PRODUCT 1537 assert(_num_par_pushes >= n, "Too many pops?"); 1538 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1539 #endif 1540 return true; 1541 } 1542 #undef BUSY 1543 1544 void ParNewGeneration::ref_processor_init() 1545 { 1546 if (_ref_processor == NULL) { 1547 // Allocate and initialize a reference processor 1548 _ref_processor = ReferenceProcessor::create_ref_processor( 1549 _reserved, // span 1550 refs_discovery_is_atomic(), // atomic_discovery 1551 refs_discovery_is_mt(), // mt_discovery 1552 NULL, // is_alive_non_header 1553 ParallelGCThreads, 1554 ParallelRefProcEnabled); 1555 } 1556 } 1557 1558 const char* ParNewGeneration::name() const { 1559 return "par new generation"; 1560 }