1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_parNewGeneration.cpp.incl" 27 28 #ifdef _MSC_VER 29 #pragma warning( push ) 30 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 31 #endif 32 ParScanThreadState::ParScanThreadState(Space* to_space_, 33 ParNewGeneration* gen_, 34 Generation* old_gen_, 35 int thread_num_, 36 ObjToScanQueueSet* work_queue_set_, 37 Stack<oop>* overflow_stacks_, 38 size_t desired_plab_sz_, 39 ParallelTaskTerminator& term_) : 40 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 41 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 42 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 43 _ageTable(false), // false ==> not the global age table, no perf data. 44 _to_space_alloc_buffer(desired_plab_sz_), 45 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 46 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 47 _older_gen_closure(gen_, this), 48 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 49 &_to_space_root_closure, gen_, &_old_gen_root_closure, 50 work_queue_set_, &term_), 51 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 52 _keep_alive_closure(&_scan_weak_ref_closure), 53 _promotion_failure_size(0), 54 _strong_roots_time(0.0), _term_time(0.0) 55 { 56 #if TASKQUEUE_STATS 57 _term_attempts = 0; 58 _overflow_refills = 0; 59 _overflow_refill_objs = 0; 60 #endif // TASKQUEUE_STATS 61 62 _survivor_chunk_array = 63 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 64 _hash_seed = 17; // Might want to take time-based random value. 65 _start = os::elapsedTime(); 66 _old_gen_closure.set_generation(old_gen_); 67 _old_gen_root_closure.set_generation(old_gen_); 68 } 69 #ifdef _MSC_VER 70 #pragma warning( pop ) 71 #endif 72 73 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 74 size_t plab_word_size) { 75 ChunkArray* sca = survivor_chunk_array(); 76 if (sca != NULL) { 77 // A non-null SCA implies that we want the PLAB data recorded. 78 sca->record_sample(plab_start, plab_word_size); 79 } 80 } 81 82 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 83 return new_obj->is_objArray() && 84 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 85 new_obj != old_obj; 86 } 87 88 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 89 assert(old->is_objArray(), "must be obj array"); 90 assert(old->is_forwarded(), "must be forwarded"); 91 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 92 assert(!old_gen()->is_in(old), "must be in young generation."); 93 94 objArrayOop obj = objArrayOop(old->forwardee()); 95 // Process ParGCArrayScanChunk elements now 96 // and push the remainder back onto queue 97 int start = arrayOop(old)->length(); 98 int end = obj->length(); 99 int remainder = end - start; 100 assert(start <= end, "just checking"); 101 if (remainder > 2 * ParGCArrayScanChunk) { 102 // Test above combines last partial chunk with a full chunk 103 end = start + ParGCArrayScanChunk; 104 arrayOop(old)->set_length(end); 105 // Push remainder. 106 bool ok = work_queue()->push(old); 107 assert(ok, "just popped, push must be okay"); 108 } else { 109 // Restore length so that it can be used if there 110 // is a promotion failure and forwarding pointers 111 // must be removed. 112 arrayOop(old)->set_length(end); 113 } 114 115 // process our set of indices (include header in first chunk) 116 // should make sure end is even (aligned to HeapWord in case of compressed oops) 117 if ((HeapWord *)obj < young_old_boundary()) { 118 // object is in to_space 119 obj->oop_iterate_range(&_to_space_closure, start, end); 120 } else { 121 // object is in old generation 122 obj->oop_iterate_range(&_old_gen_closure, start, end); 123 } 124 } 125 126 127 void ParScanThreadState::trim_queues(int max_size) { 128 ObjToScanQueue* queue = work_queue(); 129 do { 130 while (queue->size() > (juint)max_size) { 131 oop obj_to_scan; 132 if (queue->pop_local(obj_to_scan)) { 133 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 134 if (obj_to_scan->is_objArray() && 135 obj_to_scan->is_forwarded() && 136 obj_to_scan->forwardee() != obj_to_scan) { 137 scan_partial_array_and_push_remainder(obj_to_scan); 138 } else { 139 // object is in to_space 140 obj_to_scan->oop_iterate(&_to_space_closure); 141 } 142 } else { 143 // object is in old generation 144 obj_to_scan->oop_iterate(&_old_gen_closure); 145 } 146 } 147 } 148 // For the case of compressed oops, we have a private, non-shared 149 // overflow stack, so we eagerly drain it so as to more evenly 150 // distribute load early. Note: this may be good to do in 151 // general rather than delay for the final stealing phase. 152 // If applicable, we'll transfer a set of objects over to our 153 // work queue, allowing them to be stolen and draining our 154 // private overflow stack. 155 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 156 } 157 158 bool ParScanThreadState::take_from_overflow_stack() { 159 assert(ParGCUseLocalOverflow, "Else should not call"); 160 assert(young_gen()->overflow_list() == NULL, "Error"); 161 ObjToScanQueue* queue = work_queue(); 162 Stack<oop>* const of_stack = overflow_stack(); 163 const size_t num_overflow_elems = of_stack->size(); 164 const size_t space_available = queue->max_elems() - queue->size(); 165 const size_t num_take_elems = MIN3(space_available / 4, 166 ParGCDesiredObjsFromOverflowList, 167 num_overflow_elems); 168 // Transfer the most recent num_take_elems from the overflow 169 // stack to our work queue. 170 for (size_t i = 0; i != num_take_elems; i++) { 171 oop cur = of_stack->pop(); 172 oop obj_to_push = cur->forwardee(); 173 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); 174 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 175 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 176 if (should_be_partially_scanned(obj_to_push, cur)) { 177 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 178 obj_to_push = cur; 179 } 180 bool ok = queue->push(obj_to_push); 181 assert(ok, "Should have succeeded"); 182 } 183 assert(young_gen()->overflow_list() == NULL, "Error"); 184 return num_take_elems > 0; // was something transferred? 185 } 186 187 void ParScanThreadState::push_on_overflow_stack(oop p) { 188 assert(ParGCUseLocalOverflow, "Else should not call"); 189 overflow_stack()->push(p); 190 assert(young_gen()->overflow_list() == NULL, "Error"); 191 } 192 193 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 194 195 // Otherwise, if the object is small enough, try to reallocate the 196 // buffer. 197 HeapWord* obj = NULL; 198 if (!_to_space_full) { 199 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); 200 Space* const sp = to_space(); 201 if (word_sz * 100 < 202 ParallelGCBufferWastePct * plab->word_sz()) { 203 // Is small enough; abandon this buffer and start a new one. 204 plab->retire(false, false); 205 size_t buf_size = plab->word_sz(); 206 HeapWord* buf_space = sp->par_allocate(buf_size); 207 if (buf_space == NULL) { 208 const size_t min_bytes = 209 ParGCAllocBuffer::min_size() << LogHeapWordSize; 210 size_t free_bytes = sp->free(); 211 while(buf_space == NULL && free_bytes >= min_bytes) { 212 buf_size = free_bytes >> LogHeapWordSize; 213 assert(buf_size == (size_t)align_object_size(buf_size), 214 "Invariant"); 215 buf_space = sp->par_allocate(buf_size); 216 free_bytes = sp->free(); 217 } 218 } 219 if (buf_space != NULL) { 220 plab->set_word_size(buf_size); 221 plab->set_buf(buf_space); 222 record_survivor_plab(buf_space, buf_size); 223 obj = plab->allocate(word_sz); 224 // Note that we cannot compare buf_size < word_sz below 225 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). 226 assert(obj != NULL || plab->words_remaining() < word_sz, 227 "Else should have been able to allocate"); 228 // It's conceivable that we may be able to use the 229 // buffer we just grabbed for subsequent small requests 230 // even if not for this one. 231 } else { 232 // We're used up. 233 _to_space_full = true; 234 } 235 236 } else { 237 // Too large; allocate the object individually. 238 obj = sp->par_allocate(word_sz); 239 } 240 } 241 return obj; 242 } 243 244 245 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, 246 size_t word_sz) { 247 // Is the alloc in the current alloc buffer? 248 if (to_space_alloc_buffer()->contains(obj)) { 249 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), 250 "Should contain whole object."); 251 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 252 } else { 253 CollectedHeap::fill_with_object(obj, word_sz); 254 } 255 } 256 257 void ParScanThreadState::print_and_clear_promotion_failure_size() { 258 if (_promotion_failure_size != 0) { 259 if (PrintPromotionFailure) { 260 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 261 _thread_num, _promotion_failure_size); 262 } 263 _promotion_failure_size = 0; 264 } 265 } 266 267 class ParScanThreadStateSet: private ResourceArray { 268 public: 269 // Initializes states for the specified number of threads; 270 ParScanThreadStateSet(int num_threads, 271 Space& to_space, 272 ParNewGeneration& gen, 273 Generation& old_gen, 274 ObjToScanQueueSet& queue_set, 275 Stack<oop>* overflow_stacks_, 276 size_t desired_plab_sz, 277 ParallelTaskTerminator& term); 278 279 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 280 281 inline ParScanThreadState& thread_state(int i); 282 283 void reset(bool promotion_failed); 284 void flush(); 285 286 #if TASKQUEUE_STATS 287 static void 288 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 289 void print_termination_stats(outputStream* const st = gclog_or_tty); 290 static void 291 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 292 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 293 void reset_stats(); 294 #endif // TASKQUEUE_STATS 295 296 private: 297 ParallelTaskTerminator& _term; 298 ParNewGeneration& _gen; 299 Generation& _next_gen; 300 }; 301 302 303 ParScanThreadStateSet::ParScanThreadStateSet( 304 int num_threads, Space& to_space, ParNewGeneration& gen, 305 Generation& old_gen, ObjToScanQueueSet& queue_set, 306 Stack<oop>* overflow_stacks, 307 size_t desired_plab_sz, ParallelTaskTerminator& term) 308 : ResourceArray(sizeof(ParScanThreadState), num_threads), 309 _gen(gen), _next_gen(old_gen), _term(term) 310 { 311 assert(num_threads > 0, "sanity check!"); 312 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 313 "overflow_stack allocation mismatch"); 314 // Initialize states. 315 for (int i = 0; i < num_threads; ++i) { 316 new ((ParScanThreadState*)_data + i) 317 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 318 overflow_stacks, desired_plab_sz, term); 319 } 320 } 321 322 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 323 { 324 assert(i >= 0 && i < length(), "sanity check!"); 325 return ((ParScanThreadState*)_data)[i]; 326 } 327 328 329 void ParScanThreadStateSet::reset(bool promotion_failed) 330 { 331 _term.reset_for_reuse(); 332 if (promotion_failed) { 333 for (int i = 0; i < length(); ++i) { 334 thread_state(i).print_and_clear_promotion_failure_size(); 335 } 336 } 337 } 338 339 #if TASKQUEUE_STATS 340 void 341 ParScanThreadState::reset_stats() 342 { 343 taskqueue_stats().reset(); 344 _term_attempts = 0; 345 _overflow_refills = 0; 346 _overflow_refill_objs = 0; 347 } 348 349 void ParScanThreadStateSet::reset_stats() 350 { 351 for (int i = 0; i < length(); ++i) { 352 thread_state(i).reset_stats(); 353 } 354 } 355 356 void 357 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 358 { 359 st->print_raw_cr("GC Termination Stats"); 360 st->print_raw_cr(" elapsed --strong roots-- " 361 "-------termination-------"); 362 st->print_raw_cr("thr ms ms % " 363 " ms % attempts"); 364 st->print_raw_cr("--- --------- --------- ------ " 365 "--------- ------ --------"); 366 } 367 368 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 369 { 370 print_termination_stats_hdr(st); 371 372 for (int i = 0; i < length(); ++i) { 373 const ParScanThreadState & pss = thread_state(i); 374 const double elapsed_ms = pss.elapsed_time() * 1000.0; 375 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 376 const double term_ms = pss.term_time() * 1000.0; 377 st->print_cr("%3d %9.2f %9.2f %6.2f " 378 "%9.2f %6.2f " SIZE_FORMAT_W(8), 379 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 380 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 381 } 382 } 383 384 // Print stats related to work queue activity. 385 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 386 { 387 st->print_raw_cr("GC Task Stats"); 388 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 389 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 390 } 391 392 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 393 { 394 print_taskqueue_stats_hdr(st); 395 396 TaskQueueStats totals; 397 for (int i = 0; i < length(); ++i) { 398 const ParScanThreadState & pss = thread_state(i); 399 const TaskQueueStats & stats = pss.taskqueue_stats(); 400 st->print("%3d ", i); stats.print(st); st->cr(); 401 totals += stats; 402 403 if (pss.overflow_refills() > 0) { 404 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 405 SIZE_FORMAT_W(10) " overflow objects", 406 pss.overflow_refills(), pss.overflow_refill_objs()); 407 } 408 } 409 st->print("tot "); totals.print(st); st->cr(); 410 411 DEBUG_ONLY(totals.verify()); 412 } 413 #endif // TASKQUEUE_STATS 414 415 void ParScanThreadStateSet::flush() 416 { 417 // Work in this loop should be kept as lightweight as 418 // possible since this might otherwise become a bottleneck 419 // to scaling. Should we add heavy-weight work into this 420 // loop, consider parallelizing the loop into the worker threads. 421 for (int i = 0; i < length(); ++i) { 422 ParScanThreadState& par_scan_state = thread_state(i); 423 424 // Flush stats related to To-space PLAB activity and 425 // retire the last buffer. 426 par_scan_state.to_space_alloc_buffer()-> 427 flush_stats_and_retire(_gen.plab_stats(), 428 false /* !retain */); 429 430 // Every thread has its own age table. We need to merge 431 // them all into one. 432 ageTable *local_table = par_scan_state.age_table(); 433 _gen.age_table()->merge(local_table); 434 435 // Inform old gen that we're done. 436 _next_gen.par_promote_alloc_done(i); 437 _next_gen.par_oop_since_save_marks_iterate_done(i); 438 } 439 440 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { 441 // We need to call this even when ResizeOldPLAB is disabled 442 // so as to avoid breaking some asserts. While we may be able 443 // to avoid this by reorganizing the code a bit, I am loathe 444 // to do that unless we find cases where ergo leads to bad 445 // performance. 446 CFLS_LAB::compute_desired_plab_size(); 447 } 448 } 449 450 ParScanClosure::ParScanClosure(ParNewGeneration* g, 451 ParScanThreadState* par_scan_state) : 452 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) 453 { 454 assert(_g->level() == 0, "Optimized for youngest generation"); 455 _boundary = _g->reserved().end(); 456 } 457 458 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 459 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 460 461 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 462 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 463 464 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 465 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 466 467 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 468 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 469 470 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 471 ParScanThreadState* par_scan_state) 472 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 473 {} 474 475 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 476 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 477 478 #ifdef WIN32 479 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 480 #endif 481 482 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 483 ParScanThreadState* par_scan_state_, 484 ParScanWithoutBarrierClosure* to_space_closure_, 485 ParScanWithBarrierClosure* old_gen_closure_, 486 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 487 ParNewGeneration* par_gen_, 488 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 489 ObjToScanQueueSet* task_queues_, 490 ParallelTaskTerminator* terminator_) : 491 492 _par_scan_state(par_scan_state_), 493 _to_space_closure(to_space_closure_), 494 _old_gen_closure(old_gen_closure_), 495 _to_space_root_closure(to_space_root_closure_), 496 _old_gen_root_closure(old_gen_root_closure_), 497 _par_gen(par_gen_), 498 _task_queues(task_queues_), 499 _terminator(terminator_) 500 {} 501 502 void ParEvacuateFollowersClosure::do_void() { 503 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 504 505 while (true) { 506 507 // Scan to-space and old-gen objs until we run out of both. 508 oop obj_to_scan; 509 par_scan_state()->trim_queues(0); 510 511 // We have no local work, attempt to steal from other threads. 512 513 // attempt to steal work from promoted. 514 if (task_queues()->steal(par_scan_state()->thread_num(), 515 par_scan_state()->hash_seed(), 516 obj_to_scan)) { 517 bool res = work_q->push(obj_to_scan); 518 assert(res, "Empty queue should have room for a push."); 519 520 // if successful, goto Start. 521 continue; 522 523 // try global overflow list. 524 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 525 continue; 526 } 527 528 // Otherwise, offer termination. 529 par_scan_state()->start_term_time(); 530 if (terminator()->offer_termination()) break; 531 par_scan_state()->end_term_time(); 532 } 533 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 534 "Broken overflow list?"); 535 // Finish the last termination pause. 536 par_scan_state()->end_term_time(); 537 } 538 539 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, 540 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : 541 AbstractGangTask("ParNewGeneration collection"), 542 _gen(gen), _next_gen(next_gen), 543 _young_old_boundary(young_old_boundary), 544 _state_set(state_set) 545 {} 546 547 void ParNewGenTask::work(int i) { 548 GenCollectedHeap* gch = GenCollectedHeap::heap(); 549 // Since this is being done in a separate thread, need new resource 550 // and handle marks. 551 ResourceMark rm; 552 HandleMark hm; 553 // We would need multiple old-gen queues otherwise. 554 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); 555 556 Generation* old_gen = gch->next_gen(_gen); 557 558 ParScanThreadState& par_scan_state = _state_set->thread_state(i); 559 par_scan_state.set_young_old_boundary(_young_old_boundary); 560 561 par_scan_state.start_strong_roots(); 562 gch->gen_process_strong_roots(_gen->level(), 563 true, // Process younger gens, if any, 564 // as strong roots. 565 false, // no scope; this is parallel code 566 false, // not collecting perm generation. 567 SharedHeap::SO_AllClasses, 568 &par_scan_state.to_space_root_closure(), 569 true, // walk *all* scavengable nmethods 570 &par_scan_state.older_gen_closure()); 571 par_scan_state.end_strong_roots(); 572 573 // "evacuate followers". 574 par_scan_state.evacuate_followers_closure().do_void(); 575 } 576 577 #ifdef _MSC_VER 578 #pragma warning( push ) 579 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 580 #endif 581 ParNewGeneration:: 582 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 583 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 584 _overflow_list(NULL), 585 _is_alive_closure(this), 586 _plab_stats(YoungPLABSize, PLABWeight) 587 { 588 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 589 NOT_PRODUCT(_num_par_pushes = 0;) 590 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 591 guarantee(_task_queues != NULL, "task_queues allocation failure."); 592 593 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 594 ObjToScanQueue *q = new ObjToScanQueue(); 595 guarantee(q != NULL, "work_queue Allocation failure."); 596 _task_queues->register_queue(i1, q); 597 } 598 599 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 600 _task_queues->queue(i2)->initialize(); 601 602 _overflow_stacks = NULL; 603 if (ParGCUseLocalOverflow) { 604 _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads); 605 for (size_t i = 0; i < ParallelGCThreads; ++i) { 606 new (_overflow_stacks + i) Stack<oop>(); 607 } 608 } 609 610 if (UsePerfData) { 611 EXCEPTION_MARK; 612 ResourceMark rm; 613 614 const char* cname = 615 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 616 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 617 ParallelGCThreads, CHECK); 618 } 619 } 620 #ifdef _MSC_VER 621 #pragma warning( pop ) 622 #endif 623 624 // ParNewGeneration:: 625 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 626 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 627 628 template <class T> 629 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 630 #ifdef ASSERT 631 { 632 assert(!oopDesc::is_null(*p), "expected non-null ref"); 633 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 634 // We never expect to see a null reference being processed 635 // as a weak reference. 636 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 637 } 638 #endif // ASSERT 639 640 _par_cl->do_oop_nv(p); 641 642 if (Universe::heap()->is_in_reserved(p)) { 643 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 644 _rs->write_ref_field_gc_par(p, obj); 645 } 646 } 647 648 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 649 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 650 651 // ParNewGeneration:: 652 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 653 DefNewGeneration::KeepAliveClosure(cl) {} 654 655 template <class T> 656 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 657 #ifdef ASSERT 658 { 659 assert(!oopDesc::is_null(*p), "expected non-null ref"); 660 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 661 // We never expect to see a null reference being processed 662 // as a weak reference. 663 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 664 } 665 #endif // ASSERT 666 667 _cl->do_oop_nv(p); 668 669 if (Universe::heap()->is_in_reserved(p)) { 670 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 671 _rs->write_ref_field_gc_par(p, obj); 672 } 673 } 674 675 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 676 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 677 678 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 679 T heap_oop = oopDesc::load_heap_oop(p); 680 if (!oopDesc::is_null(heap_oop)) { 681 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 682 if ((HeapWord*)obj < _boundary) { 683 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 684 oop new_obj = obj->is_forwarded() 685 ? obj->forwardee() 686 : _g->DefNewGeneration::copy_to_survivor_space(obj); 687 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 688 } 689 if (_gc_barrier) { 690 // If p points to a younger generation, mark the card. 691 if ((HeapWord*)obj < _gen_boundary) { 692 _rs->write_ref_field_gc_par(p, obj); 693 } 694 } 695 } 696 } 697 698 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 699 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 700 701 class ParNewRefProcTaskProxy: public AbstractGangTask { 702 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 703 public: 704 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, 705 Generation& next_gen, 706 HeapWord* young_old_boundary, 707 ParScanThreadStateSet& state_set); 708 709 private: 710 virtual void work(int i); 711 712 private: 713 ParNewGeneration& _gen; 714 ProcessTask& _task; 715 Generation& _next_gen; 716 HeapWord* _young_old_boundary; 717 ParScanThreadStateSet& _state_set; 718 }; 719 720 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( 721 ProcessTask& task, ParNewGeneration& gen, 722 Generation& next_gen, 723 HeapWord* young_old_boundary, 724 ParScanThreadStateSet& state_set) 725 : AbstractGangTask("ParNewGeneration parallel reference processing"), 726 _gen(gen), 727 _task(task), 728 _next_gen(next_gen), 729 _young_old_boundary(young_old_boundary), 730 _state_set(state_set) 731 { 732 } 733 734 void ParNewRefProcTaskProxy::work(int i) 735 { 736 ResourceMark rm; 737 HandleMark hm; 738 ParScanThreadState& par_scan_state = _state_set.thread_state(i); 739 par_scan_state.set_young_old_boundary(_young_old_boundary); 740 _task.work(i, par_scan_state.is_alive_closure(), 741 par_scan_state.keep_alive_closure(), 742 par_scan_state.evacuate_followers_closure()); 743 } 744 745 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 746 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 747 EnqueueTask& _task; 748 749 public: 750 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 751 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 752 _task(task) 753 { } 754 755 virtual void work(int i) 756 { 757 _task.work(i); 758 } 759 }; 760 761 762 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 763 { 764 GenCollectedHeap* gch = GenCollectedHeap::heap(); 765 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 766 "not a generational heap"); 767 WorkGang* workers = gch->workers(); 768 assert(workers != NULL, "Need parallel worker threads."); 769 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 770 _generation.reserved().end(), _state_set); 771 workers->run_task(&rp_task); 772 _state_set.reset(_generation.promotion_failed()); 773 } 774 775 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 776 { 777 GenCollectedHeap* gch = GenCollectedHeap::heap(); 778 WorkGang* workers = gch->workers(); 779 assert(workers != NULL, "Need parallel worker threads."); 780 ParNewRefEnqueueTaskProxy enq_task(task); 781 workers->run_task(&enq_task); 782 } 783 784 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 785 { 786 _state_set.flush(); 787 GenCollectedHeap* gch = GenCollectedHeap::heap(); 788 gch->set_par_threads(0); // 0 ==> non-parallel. 789 gch->save_marks(); 790 } 791 792 ScanClosureWithParBarrier:: 793 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 794 ScanClosure(g, gc_barrier) {} 795 796 EvacuateFollowersClosureGeneral:: 797 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 798 OopsInGenClosure* cur, 799 OopsInGenClosure* older) : 800 _gch(gch), _level(level), 801 _scan_cur_or_nonheap(cur), _scan_older(older) 802 {} 803 804 void EvacuateFollowersClosureGeneral::do_void() { 805 do { 806 // Beware: this call will lead to closure applications via virtual 807 // calls. 808 _gch->oop_since_save_marks_iterate(_level, 809 _scan_cur_or_nonheap, 810 _scan_older); 811 } while (!_gch->no_allocs_since_save_marks(_level)); 812 } 813 814 815 bool ParNewGeneration::_avoid_promotion_undo = false; 816 817 void ParNewGeneration::adjust_desired_tenuring_threshold() { 818 // Set the desired survivor size to half the real survivor space 819 _tenuring_threshold = 820 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 821 } 822 823 // A Generation that does parallel young-gen collection. 824 825 void ParNewGeneration::collect(bool full, 826 bool clear_all_soft_refs, 827 size_t size, 828 bool is_tlab) { 829 assert(full || size > 0, "otherwise we don't want to collect"); 830 GenCollectedHeap* gch = GenCollectedHeap::heap(); 831 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 832 "not a CMS generational heap"); 833 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 834 WorkGang* workers = gch->workers(); 835 _next_gen = gch->next_gen(this); 836 assert(_next_gen != NULL, 837 "This must be the youngest gen, and not the only gen"); 838 assert(gch->n_gens() == 2, 839 "Par collection currently only works with single older gen."); 840 // Do we have to avoid promotion_undo? 841 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { 842 set_avoid_promotion_undo(true); 843 } 844 845 // If the next generation is too full to accomodate worst-case promotion 846 // from this generation, pass on collection; let the next generation 847 // do it. 848 if (!collection_attempt_is_safe()) { 849 gch->set_incremental_collection_will_fail(); 850 return; 851 } 852 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 853 854 init_assuming_no_promotion_failure(); 855 856 if (UseAdaptiveSizePolicy) { 857 set_survivor_overflow(false); 858 size_policy->minor_collection_begin(); 859 } 860 861 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); 862 // Capture heap used before collection (for printing). 863 size_t gch_prev_used = gch->used(); 864 865 SpecializationStats::clear(); 866 867 age_table()->clear(); 868 to()->clear(SpaceDecorator::Mangle); 869 870 gch->save_marks(); 871 assert(workers != NULL, "Need parallel worker threads."); 872 ParallelTaskTerminator _term(workers->total_workers(), task_queues()); 873 ParScanThreadStateSet thread_state_set(workers->total_workers(), 874 *to(), *this, *_next_gen, *task_queues(), 875 _overflow_stacks, desired_plab_sz(), _term); 876 877 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); 878 int n_workers = workers->total_workers(); 879 gch->set_par_threads(n_workers); 880 gch->rem_set()->prepare_for_younger_refs_iterate(true); 881 // It turns out that even when we're using 1 thread, doing the work in a 882 // separate thread causes wide variance in run times. We can't help this 883 // in the multi-threaded case, but we special-case n=1 here to get 884 // repeatable measurements of the 1-thread overhead of the parallel code. 885 if (n_workers > 1) { 886 GenCollectedHeap::StrongRootsScope srs(gch); 887 workers->run_task(&tsk); 888 } else { 889 GenCollectedHeap::StrongRootsScope srs(gch); 890 tsk.work(0); 891 } 892 thread_state_set.reset(promotion_failed()); 893 894 // Process (weak) reference objects found during scavenge. 895 ReferenceProcessor* rp = ref_processor(); 896 IsAliveClosure is_alive(this); 897 ScanWeakRefClosure scan_weak_ref(this); 898 KeepAliveClosure keep_alive(&scan_weak_ref); 899 ScanClosure scan_without_gc_barrier(this, false); 900 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 901 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 902 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 903 &scan_without_gc_barrier, &scan_with_gc_barrier); 904 rp->setup_policy(clear_all_soft_refs); 905 if (rp->processing_is_mt()) { 906 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 907 rp->process_discovered_references(&is_alive, &keep_alive, 908 &evacuate_followers, &task_executor); 909 } else { 910 thread_state_set.flush(); 911 gch->set_par_threads(0); // 0 ==> non-parallel. 912 gch->save_marks(); 913 rp->process_discovered_references(&is_alive, &keep_alive, 914 &evacuate_followers, NULL); 915 } 916 if (!promotion_failed()) { 917 // Swap the survivor spaces. 918 eden()->clear(SpaceDecorator::Mangle); 919 from()->clear(SpaceDecorator::Mangle); 920 if (ZapUnusedHeapArea) { 921 // This is now done here because of the piece-meal mangling which 922 // can check for valid mangling at intermediate points in the 923 // collection(s). When a minor collection fails to collect 924 // sufficient space resizing of the young generation can occur 925 // an redistribute the spaces in the young generation. Mangle 926 // here so that unzapped regions don't get distributed to 927 // other spaces. 928 to()->mangle_unused_area(); 929 } 930 swap_spaces(); 931 932 // A successful scavenge should restart the GC time limit count which is 933 // for full GC's. 934 size_policy->reset_gc_overhead_limit_count(); 935 936 assert(to()->is_empty(), "to space should be empty now"); 937 } else { 938 assert(HandlePromotionFailure, 939 "Should only be here if promotion failure handling is on"); 940 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 941 _promo_failure_scan_stack.clear(true); // Clear cached segments. 942 943 remove_forwarding_pointers(); 944 if (PrintGCDetails) { 945 gclog_or_tty->print(" (promotion failed)"); 946 } 947 // All the spaces are in play for mark-sweep. 948 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 949 from()->set_next_compaction_space(to()); 950 gch->set_incremental_collection_will_fail(); 951 // Inform the next generation that a promotion failure occurred. 952 _next_gen->promotion_failure_occurred(); 953 954 // Reset the PromotionFailureALot counters. 955 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 956 } 957 // set new iteration safe limit for the survivor spaces 958 from()->set_concurrent_iteration_safe_limit(from()->top()); 959 to()->set_concurrent_iteration_safe_limit(to()->top()); 960 961 adjust_desired_tenuring_threshold(); 962 if (ResizePLAB) { 963 plab_stats()->adjust_desired_plab_sz(); 964 } 965 966 if (PrintGC && !PrintGCDetails) { 967 gch->print_heap_change(gch_prev_used); 968 } 969 970 if (PrintGCDetails && ParallelGCVerbose) { 971 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 972 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 973 } 974 975 if (UseAdaptiveSizePolicy) { 976 size_policy->minor_collection_end(gch->gc_cause()); 977 size_policy->avg_survived()->sample(from()->used()); 978 } 979 980 update_time_of_last_gc(os::javaTimeMillis()); 981 982 SpecializationStats::print(); 983 984 rp->set_enqueuing_is_done(true); 985 if (rp->processing_is_mt()) { 986 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 987 rp->enqueue_discovered_references(&task_executor); 988 } else { 989 rp->enqueue_discovered_references(NULL); 990 } 991 rp->verify_no_references_recorded(); 992 } 993 994 static int sum; 995 void ParNewGeneration::waste_some_time() { 996 for (int i = 0; i < 100; i++) { 997 sum += i; 998 } 999 } 1000 1001 static const oop ClaimedForwardPtr = oop(0x4); 1002 1003 // Because of concurrency, there are times where an object for which 1004 // "is_forwarded()" is true contains an "interim" forwarding pointer 1005 // value. Such a value will soon be overwritten with a real value. 1006 // This method requires "obj" to have a forwarding pointer, and waits, if 1007 // necessary for a real one to be inserted, and returns it. 1008 1009 oop ParNewGeneration::real_forwardee(oop obj) { 1010 oop forward_ptr = obj->forwardee(); 1011 if (forward_ptr != ClaimedForwardPtr) { 1012 return forward_ptr; 1013 } else { 1014 return real_forwardee_slow(obj); 1015 } 1016 } 1017 1018 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1019 // Spin-read if it is claimed but not yet written by another thread. 1020 oop forward_ptr = obj->forwardee(); 1021 while (forward_ptr == ClaimedForwardPtr) { 1022 waste_some_time(); 1023 assert(obj->is_forwarded(), "precondition"); 1024 forward_ptr = obj->forwardee(); 1025 } 1026 return forward_ptr; 1027 } 1028 1029 #ifdef ASSERT 1030 bool ParNewGeneration::is_legal_forward_ptr(oop p) { 1031 return 1032 (_avoid_promotion_undo && p == ClaimedForwardPtr) 1033 || Universe::heap()->is_in_reserved(p); 1034 } 1035 #endif 1036 1037 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1038 if ((m != markOopDesc::prototype()) && 1039 (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) { 1040 MutexLocker ml(ParGCRareEvent_lock); 1041 DefNewGeneration::preserve_mark_if_necessary(obj, m); 1042 } 1043 } 1044 1045 // Multiple GC threads may try to promote an object. If the object 1046 // is successfully promoted, a forwarding pointer will be installed in 1047 // the object in the young generation. This method claims the right 1048 // to install the forwarding pointer before it copies the object, 1049 // thus avoiding the need to undo the copy as in 1050 // copy_to_survivor_space_avoiding_with_undo. 1051 1052 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( 1053 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1054 // In the sequential version, this assert also says that the object is 1055 // not forwarded. That might not be the case here. It is the case that 1056 // the caller observed it to be not forwarded at some time in the past. 1057 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1058 1059 // The sequential code read "old->age()" below. That doesn't work here, 1060 // since the age is in the mark word, and that might be overwritten with 1061 // a forwarding pointer by a parallel thread. So we must save the mark 1062 // word in a local and then analyze it. 1063 oopDesc dummyOld; 1064 dummyOld.set_mark(m); 1065 assert(!dummyOld.is_forwarded(), 1066 "should not be called with forwarding pointer mark word."); 1067 1068 oop new_obj = NULL; 1069 oop forward_ptr; 1070 1071 // Try allocating obj in to-space (unless too old) 1072 if (dummyOld.age() < tenuring_threshold()) { 1073 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1074 if (new_obj == NULL) { 1075 set_survivor_overflow(true); 1076 } 1077 } 1078 1079 if (new_obj == NULL) { 1080 // Either to-space is full or we decided to promote 1081 // try allocating obj tenured 1082 1083 // Attempt to install a null forwarding pointer (atomically), 1084 // to claim the right to install the real forwarding pointer. 1085 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1086 if (forward_ptr != NULL) { 1087 // someone else beat us to it. 1088 return real_forwardee(old); 1089 } 1090 1091 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1092 old, m, sz); 1093 1094 if (new_obj == NULL) { 1095 if (!HandlePromotionFailure) { 1096 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag 1097 // is incorrectly set. In any case, its seriously wrong to be here! 1098 vm_exit_out_of_memory(sz*wordSize, "promotion"); 1099 } 1100 // promotion failed, forward to self 1101 _promotion_failed = true; 1102 new_obj = old; 1103 1104 preserve_mark_if_necessary(old, m); 1105 // Log the size of the maiden promotion failure 1106 par_scan_state->log_promotion_failure(sz); 1107 } 1108 1109 old->forward_to(new_obj); 1110 forward_ptr = NULL; 1111 } else { 1112 // Is in to-space; do copying ourselves. 1113 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1114 forward_ptr = old->forward_to_atomic(new_obj); 1115 // Restore the mark word copied above. 1116 new_obj->set_mark(m); 1117 // Increment age if obj still in new generation 1118 new_obj->incr_age(); 1119 par_scan_state->age_table()->add(new_obj, sz); 1120 } 1121 assert(new_obj != NULL, "just checking"); 1122 1123 if (forward_ptr == NULL) { 1124 oop obj_to_push = new_obj; 1125 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1126 // Length field used as index of next element to be scanned. 1127 // Real length can be obtained from real_forwardee() 1128 arrayOop(old)->set_length(0); 1129 obj_to_push = old; 1130 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1131 "push forwarded object"); 1132 } 1133 // Push it on one of the queues of to-be-scanned objects. 1134 bool simulate_overflow = false; 1135 NOT_PRODUCT( 1136 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1137 // simulate a stack overflow 1138 simulate_overflow = true; 1139 } 1140 ) 1141 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1142 // Add stats for overflow pushes. 1143 if (Verbose && PrintGCDetails) { 1144 gclog_or_tty->print("queue overflow!\n"); 1145 } 1146 push_on_overflow_list(old, par_scan_state); 1147 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1148 } 1149 1150 return new_obj; 1151 } 1152 1153 // Oops. Someone beat us to it. Undo the allocation. Where did we 1154 // allocate it? 1155 if (is_in_reserved(new_obj)) { 1156 // Must be in to_space. 1157 assert(to()->is_in_reserved(new_obj), "Checking"); 1158 if (forward_ptr == ClaimedForwardPtr) { 1159 // Wait to get the real forwarding pointer value. 1160 forward_ptr = real_forwardee(old); 1161 } 1162 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1163 } 1164 1165 return forward_ptr; 1166 } 1167 1168 1169 // Multiple GC threads may try to promote the same object. If two 1170 // or more GC threads copy the object, only one wins the race to install 1171 // the forwarding pointer. The other threads have to undo their copy. 1172 1173 oop ParNewGeneration::copy_to_survivor_space_with_undo( 1174 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1175 1176 // In the sequential version, this assert also says that the object is 1177 // not forwarded. That might not be the case here. It is the case that 1178 // the caller observed it to be not forwarded at some time in the past. 1179 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1180 1181 // The sequential code read "old->age()" below. That doesn't work here, 1182 // since the age is in the mark word, and that might be overwritten with 1183 // a forwarding pointer by a parallel thread. So we must save the mark 1184 // word here, install it in a local oopDesc, and then analyze it. 1185 oopDesc dummyOld; 1186 dummyOld.set_mark(m); 1187 assert(!dummyOld.is_forwarded(), 1188 "should not be called with forwarding pointer mark word."); 1189 1190 bool failed_to_promote = false; 1191 oop new_obj = NULL; 1192 oop forward_ptr; 1193 1194 // Try allocating obj in to-space (unless too old) 1195 if (dummyOld.age() < tenuring_threshold()) { 1196 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1197 if (new_obj == NULL) { 1198 set_survivor_overflow(true); 1199 } 1200 } 1201 1202 if (new_obj == NULL) { 1203 // Either to-space is full or we decided to promote 1204 // try allocating obj tenured 1205 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), 1206 old, m, sz); 1207 1208 if (new_obj == NULL) { 1209 if (!HandlePromotionFailure) { 1210 // A failed promotion likely means the MaxLiveObjectEvacuationRatio 1211 // flag is incorrectly set. In any case, its seriously wrong to be 1212 // here! 1213 vm_exit_out_of_memory(sz*wordSize, "promotion"); 1214 } 1215 // promotion failed, forward to self 1216 forward_ptr = old->forward_to_atomic(old); 1217 new_obj = old; 1218 1219 if (forward_ptr != NULL) { 1220 return forward_ptr; // someone else succeeded 1221 } 1222 1223 _promotion_failed = true; 1224 failed_to_promote = true; 1225 1226 preserve_mark_if_necessary(old, m); 1227 // Log the size of the maiden promotion failure 1228 par_scan_state->log_promotion_failure(sz); 1229 } 1230 } else { 1231 // Is in to-space; do copying ourselves. 1232 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1233 // Restore the mark word copied above. 1234 new_obj->set_mark(m); 1235 // Increment age if new_obj still in new generation 1236 new_obj->incr_age(); 1237 par_scan_state->age_table()->add(new_obj, sz); 1238 } 1239 assert(new_obj != NULL, "just checking"); 1240 1241 // Now attempt to install the forwarding pointer (atomically). 1242 // We have to copy the mark word before overwriting with forwarding 1243 // ptr, so we can restore it below in the copy. 1244 if (!failed_to_promote) { 1245 forward_ptr = old->forward_to_atomic(new_obj); 1246 } 1247 1248 if (forward_ptr == NULL) { 1249 oop obj_to_push = new_obj; 1250 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1251 // Length field used as index of next element to be scanned. 1252 // Real length can be obtained from real_forwardee() 1253 arrayOop(old)->set_length(0); 1254 obj_to_push = old; 1255 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1256 "push forwarded object"); 1257 } 1258 // Push it on one of the queues of to-be-scanned objects. 1259 bool simulate_overflow = false; 1260 NOT_PRODUCT( 1261 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1262 // simulate a stack overflow 1263 simulate_overflow = true; 1264 } 1265 ) 1266 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1267 // Add stats for overflow pushes. 1268 push_on_overflow_list(old, par_scan_state); 1269 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1270 } 1271 1272 return new_obj; 1273 } 1274 1275 // Oops. Someone beat us to it. Undo the allocation. Where did we 1276 // allocate it? 1277 if (is_in_reserved(new_obj)) { 1278 // Must be in to_space. 1279 assert(to()->is_in_reserved(new_obj), "Checking"); 1280 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1281 } else { 1282 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); 1283 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), 1284 (HeapWord*)new_obj, sz); 1285 } 1286 1287 return forward_ptr; 1288 } 1289 1290 #ifndef PRODUCT 1291 // It's OK to call this multi-threaded; the worst thing 1292 // that can happen is that we'll get a bunch of closely 1293 // spaced simulated oveflows, but that's OK, in fact 1294 // probably good as it would exercise the overflow code 1295 // under contention. 1296 bool ParNewGeneration::should_simulate_overflow() { 1297 if (_overflow_counter-- <= 0) { // just being defensive 1298 _overflow_counter = ParGCWorkQueueOverflowInterval; 1299 return true; 1300 } else { 1301 return false; 1302 } 1303 } 1304 #endif 1305 1306 // In case we are using compressed oops, we need to be careful. 1307 // If the object being pushed is an object array, then its length 1308 // field keeps track of the "grey boundary" at which the next 1309 // incremental scan will be done (see ParGCArrayScanChunk). 1310 // When using compressed oops, this length field is kept in the 1311 // lower 32 bits of the erstwhile klass word and cannot be used 1312 // for the overflow chaining pointer (OCP below). As such the OCP 1313 // would itself need to be compressed into the top 32-bits in this 1314 // case. Unfortunately, see below, in the event that we have a 1315 // promotion failure, the node to be pushed on the list can be 1316 // outside of the Java heap, so the heap-based pointer compression 1317 // would not work (we would have potential aliasing between C-heap 1318 // and Java-heap pointers). For this reason, when using compressed 1319 // oops, we simply use a worker-thread-local, non-shared overflow 1320 // list in the form of a growable array, with a slightly different 1321 // overflow stack draining strategy. If/when we start using fat 1322 // stacks here, we can go back to using (fat) pointer chains 1323 // (although some performance comparisons would be useful since 1324 // single global lists have their own performance disadvantages 1325 // as we were made painfully aware not long ago, see 6786503). 1326 #define BUSY (oop(0x1aff1aff)) 1327 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1328 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1329 if (ParGCUseLocalOverflow) { 1330 // In the case of compressed oops, we use a private, not-shared 1331 // overflow stack. 1332 par_scan_state->push_on_overflow_stack(from_space_obj); 1333 } else { 1334 assert(!UseCompressedOops, "Error"); 1335 // if the object has been forwarded to itself, then we cannot 1336 // use the klass pointer for the linked list. Instead we have 1337 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1338 // XXX This is horribly inefficient when a promotion failure occurs 1339 // and should be fixed. XXX FIX ME !!! 1340 #ifndef PRODUCT 1341 Atomic::inc_ptr(&_num_par_pushes); 1342 assert(_num_par_pushes > 0, "Tautology"); 1343 #endif 1344 if (from_space_obj->forwardee() == from_space_obj) { 1345 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); 1346 listhead->forward_to(from_space_obj); 1347 from_space_obj = listhead; 1348 } 1349 oop observed_overflow_list = _overflow_list; 1350 oop cur_overflow_list; 1351 do { 1352 cur_overflow_list = observed_overflow_list; 1353 if (cur_overflow_list != BUSY) { 1354 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1355 } else { 1356 from_space_obj->set_klass_to_list_ptr(NULL); 1357 } 1358 observed_overflow_list = 1359 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1360 } while (cur_overflow_list != observed_overflow_list); 1361 } 1362 } 1363 1364 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1365 bool res; 1366 1367 if (ParGCUseLocalOverflow) { 1368 res = par_scan_state->take_from_overflow_stack(); 1369 } else { 1370 assert(!UseCompressedOops, "Error"); 1371 res = take_from_overflow_list_work(par_scan_state); 1372 } 1373 return res; 1374 } 1375 1376 1377 // *NOTE*: The overflow list manipulation code here and 1378 // in CMSCollector:: are very similar in shape, 1379 // except that in the CMS case we thread the objects 1380 // directly into the list via their mark word, and do 1381 // not need to deal with special cases below related 1382 // to chunking of object arrays and promotion failure 1383 // handling. 1384 // CR 6797058 has been filed to attempt consolidation of 1385 // the common code. 1386 // Because of the common code, if you make any changes in 1387 // the code below, please check the CMS version to see if 1388 // similar changes might be needed. 1389 // See CMSCollector::par_take_from_overflow_list() for 1390 // more extensive documentation comments. 1391 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1392 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1393 // How many to take? 1394 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1395 (size_t)ParGCDesiredObjsFromOverflowList); 1396 1397 assert(!UseCompressedOops, "Error"); 1398 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1399 if (_overflow_list == NULL) return false; 1400 1401 // Otherwise, there was something there; try claiming the list. 1402 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1403 // Trim off a prefix of at most objsFromOverflow items 1404 Thread* tid = Thread::current(); 1405 size_t spin_count = (size_t)ParallelGCThreads; 1406 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1407 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1408 // someone grabbed it before we did ... 1409 // ... we spin for a short while... 1410 os::sleep(tid, sleep_time_millis, false); 1411 if (_overflow_list == NULL) { 1412 // nothing left to take 1413 return false; 1414 } else if (_overflow_list != BUSY) { 1415 // try and grab the prefix 1416 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 1417 } 1418 } 1419 if (prefix == NULL || prefix == BUSY) { 1420 // Nothing to take or waited long enough 1421 if (prefix == NULL) { 1422 // Write back the NULL in case we overwrote it with BUSY above 1423 // and it is still the same value. 1424 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1425 } 1426 return false; 1427 } 1428 assert(prefix != NULL && prefix != BUSY, "Error"); 1429 size_t i = 1; 1430 oop cur = prefix; 1431 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1432 i++; cur = oop(cur->klass()); 1433 } 1434 1435 // Reattach remaining (suffix) to overflow list 1436 if (cur->klass_or_null() == NULL) { 1437 // Write back the NULL in lieu of the BUSY we wrote 1438 // above and it is still the same value. 1439 if (_overflow_list == BUSY) { 1440 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1441 } 1442 } else { 1443 assert(cur->klass_or_null() != BUSY, "Error"); 1444 oop suffix = oop(cur->klass()); // suffix will be put back on global list 1445 cur->set_klass_to_list_ptr(NULL); // break off suffix 1446 // It's possible that the list is still in the empty(busy) state 1447 // we left it in a short while ago; in that case we may be 1448 // able to place back the suffix. 1449 oop observed_overflow_list = _overflow_list; 1450 oop cur_overflow_list = observed_overflow_list; 1451 bool attached = false; 1452 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1453 observed_overflow_list = 1454 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1455 if (cur_overflow_list == observed_overflow_list) { 1456 attached = true; 1457 break; 1458 } else cur_overflow_list = observed_overflow_list; 1459 } 1460 if (!attached) { 1461 // Too bad, someone else got in in between; we'll need to do a splice. 1462 // Find the last item of suffix list 1463 oop last = suffix; 1464 while (last->klass_or_null() != NULL) { 1465 last = oop(last->klass()); 1466 } 1467 // Atomically prepend suffix to current overflow list 1468 observed_overflow_list = _overflow_list; 1469 do { 1470 cur_overflow_list = observed_overflow_list; 1471 if (cur_overflow_list != BUSY) { 1472 // Do the splice ... 1473 last->set_klass_to_list_ptr(cur_overflow_list); 1474 } else { // cur_overflow_list == BUSY 1475 last->set_klass_to_list_ptr(NULL); 1476 } 1477 observed_overflow_list = 1478 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1479 } while (cur_overflow_list != observed_overflow_list); 1480 } 1481 } 1482 1483 // Push objects on prefix list onto this thread's work queue 1484 assert(prefix != NULL && prefix != BUSY, "program logic"); 1485 cur = prefix; 1486 ssize_t n = 0; 1487 while (cur != NULL) { 1488 oop obj_to_push = cur->forwardee(); 1489 oop next = oop(cur->klass_or_null()); 1490 cur->set_klass(obj_to_push->klass()); 1491 // This may be an array object that is self-forwarded. In that case, the list pointer 1492 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1493 if (!is_in_reserved(cur)) { 1494 // This can become a scaling bottleneck when there is work queue overflow coincident 1495 // with promotion failure. 1496 oopDesc* f = cur; 1497 FREE_C_HEAP_ARRAY(oopDesc, f); 1498 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1499 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1500 obj_to_push = cur; 1501 } 1502 bool ok = work_q->push(obj_to_push); 1503 assert(ok, "Should have succeeded"); 1504 cur = next; 1505 n++; 1506 } 1507 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1508 #ifndef PRODUCT 1509 assert(_num_par_pushes >= n, "Too many pops?"); 1510 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1511 #endif 1512 return true; 1513 } 1514 #undef BUSY 1515 1516 void ParNewGeneration::ref_processor_init() 1517 { 1518 if (_ref_processor == NULL) { 1519 // Allocate and initialize a reference processor 1520 _ref_processor = ReferenceProcessor::create_ref_processor( 1521 _reserved, // span 1522 refs_discovery_is_atomic(), // atomic_discovery 1523 refs_discovery_is_mt(), // mt_discovery 1524 NULL, // is_alive_non_header 1525 ParallelGCThreads, 1526 ParallelRefProcEnabled); 1527 } 1528 } 1529 1530 const char* ParNewGeneration::name() const { 1531 return "par new generation"; 1532 } 1533 1534 bool ParNewGeneration::in_use() { 1535 return UseParNewGC && ParallelGCThreads > 0; 1536 }