1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/cms/compactibleFreeListSpace.hpp" 27 #include "gc/cms/concurrentMarkSweepGeneration.hpp" 28 #include "gc/cms/parNewGeneration.hpp" 29 #include "gc/cms/parOopClosures.inline.hpp" 30 #include "gc/serial/defNewGeneration.inline.hpp" 31 #include "gc/shared/adaptiveSizePolicy.hpp" 32 #include "gc/shared/ageTable.hpp" 33 #include "gc/shared/copyFailedInfo.hpp" 34 #include "gc/shared/gcHeapSummary.hpp" 35 #include "gc/shared/gcTimer.hpp" 36 #include "gc/shared/gcTrace.hpp" 37 #include "gc/shared/gcTraceTime.hpp" 38 #include "gc/shared/genCollectedHeap.hpp" 39 #include "gc/shared/genOopClosures.inline.hpp" 40 #include "gc/shared/generation.hpp" 41 #include "gc/shared/plab.inline.hpp" 42 #include "gc/shared/referencePolicy.hpp" 43 #include "gc/shared/space.hpp" 44 #include "gc/shared/spaceDecorator.hpp" 45 #include "gc/shared/strongRootsScope.hpp" 46 #include "gc/shared/taskqueue.inline.hpp" 47 #include "gc/shared/workgroup.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/objArrayOop.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "runtime/atomic.inline.hpp" 52 #include "runtime/handles.hpp" 53 #include "runtime/handles.inline.hpp" 54 #include "runtime/java.hpp" 55 #include "runtime/thread.inline.hpp" 56 #include "utilities/copy.hpp" 57 #include "utilities/globalDefinitions.hpp" 58 #include "utilities/stack.inline.hpp" 59 60 #ifdef _MSC_VER 61 #pragma warning( push ) 62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 63 #endif 64 ParScanThreadState::ParScanThreadState(Space* to_space_, 65 ParNewGeneration* gen_, 66 Generation* old_gen_, 67 int thread_num_, 68 ObjToScanQueueSet* work_queue_set_, 69 Stack<oop, mtGC>* overflow_stacks_, 70 size_t desired_plab_sz_, 71 ParallelTaskTerminator& term_) : 72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 75 _ageTable(false), // false ==> not the global age table, no perf data. 76 _to_space_alloc_buffer(desired_plab_sz_), 77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), 78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), 79 _older_gen_closure(gen_, this), 80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 81 &_to_space_root_closure, gen_, &_old_gen_root_closure, 82 work_queue_set_, &term_), 83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), 84 _keep_alive_closure(&_scan_weak_ref_closure), 85 _strong_roots_time(0.0), _term_time(0.0) 86 { 87 #if TASKQUEUE_STATS 88 _term_attempts = 0; 89 _overflow_refills = 0; 90 _overflow_refill_objs = 0; 91 #endif // TASKQUEUE_STATS 92 93 _survivor_chunk_array = 94 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 95 _hash_seed = 17; // Might want to take time-based random value. 96 _start = os::elapsedTime(); 97 _old_gen_closure.set_generation(old_gen_); 98 _old_gen_root_closure.set_generation(old_gen_); 99 } 100 #ifdef _MSC_VER 101 #pragma warning( pop ) 102 #endif 103 104 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 105 size_t plab_word_size) { 106 ChunkArray* sca = survivor_chunk_array(); 107 if (sca != NULL) { 108 // A non-null SCA implies that we want the PLAB data recorded. 109 sca->record_sample(plab_start, plab_word_size); 110 } 111 } 112 113 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 114 return new_obj->is_objArray() && 115 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 116 new_obj != old_obj; 117 } 118 119 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 120 assert(old->is_objArray(), "must be obj array"); 121 assert(old->is_forwarded(), "must be forwarded"); 122 assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap."); 123 assert(!old_gen()->is_in(old), "must be in young generation."); 124 125 objArrayOop obj = objArrayOop(old->forwardee()); 126 // Process ParGCArrayScanChunk elements now 127 // and push the remainder back onto queue 128 int start = arrayOop(old)->length(); 129 int end = obj->length(); 130 int remainder = end - start; 131 assert(start <= end, "just checking"); 132 if (remainder > 2 * ParGCArrayScanChunk) { 133 // Test above combines last partial chunk with a full chunk 134 end = start + ParGCArrayScanChunk; 135 arrayOop(old)->set_length(end); 136 // Push remainder. 137 bool ok = work_queue()->push(old); 138 assert(ok, "just popped, push must be okay"); 139 } else { 140 // Restore length so that it can be used if there 141 // is a promotion failure and forwarding pointers 142 // must be removed. 143 arrayOop(old)->set_length(end); 144 } 145 146 // process our set of indices (include header in first chunk) 147 // should make sure end is even (aligned to HeapWord in case of compressed oops) 148 if ((HeapWord *)obj < young_old_boundary()) { 149 // object is in to_space 150 obj->oop_iterate_range(&_to_space_closure, start, end); 151 } else { 152 // object is in old generation 153 obj->oop_iterate_range(&_old_gen_closure, start, end); 154 } 155 } 156 157 158 void ParScanThreadState::trim_queues(int max_size) { 159 ObjToScanQueue* queue = work_queue(); 160 do { 161 while (queue->size() > (juint)max_size) { 162 oop obj_to_scan; 163 if (queue->pop_local(obj_to_scan)) { 164 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 165 if (obj_to_scan->is_objArray() && 166 obj_to_scan->is_forwarded() && 167 obj_to_scan->forwardee() != obj_to_scan) { 168 scan_partial_array_and_push_remainder(obj_to_scan); 169 } else { 170 // object is in to_space 171 obj_to_scan->oop_iterate(&_to_space_closure); 172 } 173 } else { 174 // object is in old generation 175 obj_to_scan->oop_iterate(&_old_gen_closure); 176 } 177 } 178 } 179 // For the case of compressed oops, we have a private, non-shared 180 // overflow stack, so we eagerly drain it so as to more evenly 181 // distribute load early. Note: this may be good to do in 182 // general rather than delay for the final stealing phase. 183 // If applicable, we'll transfer a set of objects over to our 184 // work queue, allowing them to be stolen and draining our 185 // private overflow stack. 186 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 187 } 188 189 bool ParScanThreadState::take_from_overflow_stack() { 190 assert(ParGCUseLocalOverflow, "Else should not call"); 191 assert(young_gen()->overflow_list() == NULL, "Error"); 192 ObjToScanQueue* queue = work_queue(); 193 Stack<oop, mtGC>* const of_stack = overflow_stack(); 194 const size_t num_overflow_elems = of_stack->size(); 195 const size_t space_available = queue->max_elems() - queue->size(); 196 const size_t num_take_elems = MIN3(space_available / 4, 197 ParGCDesiredObjsFromOverflowList, 198 num_overflow_elems); 199 // Transfer the most recent num_take_elems from the overflow 200 // stack to our work queue. 201 for (size_t i = 0; i != num_take_elems; i++) { 202 oop cur = of_stack->pop(); 203 oop obj_to_push = cur->forwardee(); 204 assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap"); 205 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 206 assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 207 if (should_be_partially_scanned(obj_to_push, cur)) { 208 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 209 obj_to_push = cur; 210 } 211 bool ok = queue->push(obj_to_push); 212 assert(ok, "Should have succeeded"); 213 } 214 assert(young_gen()->overflow_list() == NULL, "Error"); 215 return num_take_elems > 0; // was something transferred? 216 } 217 218 void ParScanThreadState::push_on_overflow_stack(oop p) { 219 assert(ParGCUseLocalOverflow, "Else should not call"); 220 overflow_stack()->push(p); 221 assert(young_gen()->overflow_list() == NULL, "Error"); 222 } 223 224 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 225 226 // Otherwise, if the object is small enough, try to reallocate the 227 // buffer. 228 HeapWord* obj = NULL; 229 if (!_to_space_full) { 230 PLAB* const plab = to_space_alloc_buffer(); 231 Space* const sp = to_space(); 232 if (word_sz * 100 < 233 ParallelGCBufferWastePct * plab->word_sz()) { 234 // Is small enough; abandon this buffer and start a new one. 235 plab->retire(); 236 size_t buf_size = plab->word_sz(); 237 HeapWord* buf_space = sp->par_allocate(buf_size); 238 if (buf_space == NULL) { 239 const size_t min_bytes = 240 PLAB::min_size() << LogHeapWordSize; 241 size_t free_bytes = sp->free(); 242 while(buf_space == NULL && free_bytes >= min_bytes) { 243 buf_size = free_bytes >> LogHeapWordSize; 244 assert(buf_size == (size_t)align_object_size(buf_size), 245 "Invariant"); 246 buf_space = sp->par_allocate(buf_size); 247 free_bytes = sp->free(); 248 } 249 } 250 if (buf_space != NULL) { 251 plab->set_word_size(buf_size); 252 plab->set_buf(buf_space); 253 record_survivor_plab(buf_space, buf_size); 254 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 255 // Note that we cannot compare buf_size < word_sz below 256 // because of AlignmentReserve (see PLAB::allocate()). 257 assert(obj != NULL || plab->words_remaining() < word_sz, 258 "Else should have been able to allocate"); 259 // It's conceivable that we may be able to use the 260 // buffer we just grabbed for subsequent small requests 261 // even if not for this one. 262 } else { 263 // We're used up. 264 _to_space_full = true; 265 } 266 267 } else { 268 // Too large; allocate the object individually. 269 obj = sp->par_allocate(word_sz); 270 } 271 } 272 return obj; 273 } 274 275 276 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 277 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 278 } 279 280 void ParScanThreadState::print_promotion_failure_size() { 281 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { 282 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", 283 _thread_num, _promotion_failed_info.first_size()); 284 } 285 } 286 287 class ParScanThreadStateSet: private ResourceArray { 288 public: 289 // Initializes states for the specified number of threads; 290 ParScanThreadStateSet(int num_threads, 291 Space& to_space, 292 ParNewGeneration& gen, 293 Generation& old_gen, 294 ObjToScanQueueSet& queue_set, 295 Stack<oop, mtGC>* overflow_stacks_, 296 size_t desired_plab_sz, 297 ParallelTaskTerminator& term); 298 299 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 300 301 inline ParScanThreadState& thread_state(int i); 302 303 void trace_promotion_failed(const YoungGCTracer* gc_tracer); 304 void reset(uint active_workers, bool promotion_failed); 305 void flush(); 306 307 #if TASKQUEUE_STATS 308 static void 309 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 310 void print_termination_stats(outputStream* const st = gclog_or_tty); 311 static void 312 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 313 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); 314 void reset_stats(); 315 #endif // TASKQUEUE_STATS 316 317 private: 318 ParallelTaskTerminator& _term; 319 ParNewGeneration& _gen; 320 Generation& _old_gen; 321 public: 322 bool is_valid(int id) const { return id < length(); } 323 ParallelTaskTerminator* terminator() { return &_term; } 324 }; 325 326 327 ParScanThreadStateSet::ParScanThreadStateSet( 328 int num_threads, Space& to_space, ParNewGeneration& gen, 329 Generation& old_gen, ObjToScanQueueSet& queue_set, 330 Stack<oop, mtGC>* overflow_stacks, 331 size_t desired_plab_sz, ParallelTaskTerminator& term) 332 : ResourceArray(sizeof(ParScanThreadState), num_threads), 333 _gen(gen), _old_gen(old_gen), _term(term) 334 { 335 assert(num_threads > 0, "sanity check!"); 336 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 337 "overflow_stack allocation mismatch"); 338 // Initialize states. 339 for (int i = 0; i < num_threads; ++i) { 340 new ((ParScanThreadState*)_data + i) 341 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, 342 overflow_stacks, desired_plab_sz, term); 343 } 344 } 345 346 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) 347 { 348 assert(i >= 0 && i < length(), "sanity check!"); 349 return ((ParScanThreadState*)_data)[i]; 350 } 351 352 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { 353 for (int i = 0; i < length(); ++i) { 354 if (thread_state(i).promotion_failed()) { 355 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); 356 thread_state(i).promotion_failed_info().reset(); 357 } 358 } 359 } 360 361 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) 362 { 363 _term.reset_for_reuse(active_threads); 364 if (promotion_failed) { 365 for (int i = 0; i < length(); ++i) { 366 thread_state(i).print_promotion_failure_size(); 367 } 368 } 369 } 370 371 #if TASKQUEUE_STATS 372 void 373 ParScanThreadState::reset_stats() 374 { 375 taskqueue_stats().reset(); 376 _term_attempts = 0; 377 _overflow_refills = 0; 378 _overflow_refill_objs = 0; 379 } 380 381 void ParScanThreadStateSet::reset_stats() 382 { 383 for (int i = 0; i < length(); ++i) { 384 thread_state(i).reset_stats(); 385 } 386 } 387 388 void 389 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) 390 { 391 st->print_raw_cr("GC Termination Stats"); 392 st->print_raw_cr(" elapsed --strong roots-- " 393 "-------termination-------"); 394 st->print_raw_cr("thr ms ms % " 395 " ms % attempts"); 396 st->print_raw_cr("--- --------- --------- ------ " 397 "--------- ------ --------"); 398 } 399 400 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) 401 { 402 print_termination_stats_hdr(st); 403 404 for (int i = 0; i < length(); ++i) { 405 const ParScanThreadState & pss = thread_state(i); 406 const double elapsed_ms = pss.elapsed_time() * 1000.0; 407 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 408 const double term_ms = pss.term_time() * 1000.0; 409 st->print_cr("%3d %9.2f %9.2f %6.2f " 410 "%9.2f %6.2f " SIZE_FORMAT_W(8), 411 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 412 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 413 } 414 } 415 416 // Print stats related to work queue activity. 417 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) 418 { 419 st->print_raw_cr("GC Task Stats"); 420 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 421 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 422 } 423 424 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) 425 { 426 print_taskqueue_stats_hdr(st); 427 428 TaskQueueStats totals; 429 for (int i = 0; i < length(); ++i) { 430 const ParScanThreadState & pss = thread_state(i); 431 const TaskQueueStats & stats = pss.taskqueue_stats(); 432 st->print("%3d ", i); stats.print(st); st->cr(); 433 totals += stats; 434 435 if (pss.overflow_refills() > 0) { 436 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 437 SIZE_FORMAT_W(10) " overflow objects", 438 pss.overflow_refills(), pss.overflow_refill_objs()); 439 } 440 } 441 st->print("tot "); totals.print(st); st->cr(); 442 443 DEBUG_ONLY(totals.verify()); 444 } 445 #endif // TASKQUEUE_STATS 446 447 void ParScanThreadStateSet::flush() 448 { 449 // Work in this loop should be kept as lightweight as 450 // possible since this might otherwise become a bottleneck 451 // to scaling. Should we add heavy-weight work into this 452 // loop, consider parallelizing the loop into the worker threads. 453 for (int i = 0; i < length(); ++i) { 454 ParScanThreadState& par_scan_state = thread_state(i); 455 456 // Flush stats related to To-space PLAB activity and 457 // retire the last buffer. 458 par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats()); 459 460 // Every thread has its own age table. We need to merge 461 // them all into one. 462 ageTable *local_table = par_scan_state.age_table(); 463 _gen.age_table()->merge(local_table); 464 465 // Inform old gen that we're done. 466 _old_gen.par_promote_alloc_done(i); 467 _old_gen.par_oop_since_save_marks_iterate_done(i); 468 } 469 470 if (UseConcMarkSweepGC) { 471 // We need to call this even when ResizeOldPLAB is disabled 472 // so as to avoid breaking some asserts. While we may be able 473 // to avoid this by reorganizing the code a bit, I am loathe 474 // to do that unless we find cases where ergo leads to bad 475 // performance. 476 CFLS_LAB::compute_desired_plab_size(); 477 } 478 } 479 480 ParScanClosure::ParScanClosure(ParNewGeneration* g, 481 ParScanThreadState* par_scan_state) : 482 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) 483 { 484 assert(_g->level() == 0, "Optimized for youngest generation"); 485 _boundary = _g->reserved().end(); 486 } 487 488 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 489 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 490 491 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 492 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 493 494 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 495 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 496 497 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 498 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 499 500 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 501 ParScanThreadState* par_scan_state) 502 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 503 {} 504 505 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 506 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 507 508 #ifdef WIN32 509 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 510 #endif 511 512 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 513 ParScanThreadState* par_scan_state_, 514 ParScanWithoutBarrierClosure* to_space_closure_, 515 ParScanWithBarrierClosure* old_gen_closure_, 516 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 517 ParNewGeneration* par_gen_, 518 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 519 ObjToScanQueueSet* task_queues_, 520 ParallelTaskTerminator* terminator_) : 521 522 _par_scan_state(par_scan_state_), 523 _to_space_closure(to_space_closure_), 524 _old_gen_closure(old_gen_closure_), 525 _to_space_root_closure(to_space_root_closure_), 526 _old_gen_root_closure(old_gen_root_closure_), 527 _par_gen(par_gen_), 528 _task_queues(task_queues_), 529 _terminator(terminator_) 530 {} 531 532 void ParEvacuateFollowersClosure::do_void() { 533 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 534 535 while (true) { 536 537 // Scan to-space and old-gen objs until we run out of both. 538 oop obj_to_scan; 539 par_scan_state()->trim_queues(0); 540 541 // We have no local work, attempt to steal from other threads. 542 543 // attempt to steal work from promoted. 544 if (task_queues()->steal(par_scan_state()->thread_num(), 545 par_scan_state()->hash_seed(), 546 obj_to_scan)) { 547 bool res = work_q->push(obj_to_scan); 548 assert(res, "Empty queue should have room for a push."); 549 550 // if successful, goto Start. 551 continue; 552 553 // try global overflow list. 554 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 555 continue; 556 } 557 558 // Otherwise, offer termination. 559 par_scan_state()->start_term_time(); 560 if (terminator()->offer_termination()) break; 561 par_scan_state()->end_term_time(); 562 } 563 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 564 "Broken overflow list?"); 565 // Finish the last termination pause. 566 par_scan_state()->end_term_time(); 567 } 568 569 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen, 570 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set, 571 StrongRootsScope* strong_roots_scope) : 572 AbstractGangTask("ParNewGeneration collection"), 573 _gen(gen), _old_gen(old_gen), 574 _young_old_boundary(young_old_boundary), 575 _state_set(state_set), 576 _strong_roots_scope(strong_roots_scope) 577 {} 578 579 // Reset the terminator for the given number of 580 // active threads. 581 void ParNewGenTask::set_for_termination(uint active_workers) { 582 _state_set->reset(active_workers, _gen->promotion_failed()); 583 // Should the heap be passed in? There's only 1 for now so 584 // grab it instead. 585 GenCollectedHeap* gch = GenCollectedHeap::heap(); 586 gch->set_n_termination(active_workers); 587 } 588 589 void ParNewGenTask::work(uint worker_id) { 590 GenCollectedHeap* gch = GenCollectedHeap::heap(); 591 // Since this is being done in a separate thread, need new resource 592 // and handle marks. 593 ResourceMark rm; 594 HandleMark hm; 595 596 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 597 assert(_state_set->is_valid(worker_id), "Should not have been called"); 598 599 par_scan_state.set_young_old_boundary(_young_old_boundary); 600 601 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), 602 gch->rem_set()->klass_rem_set()); 603 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 604 &par_scan_state.to_space_root_closure(), 605 false); 606 607 par_scan_state.start_strong_roots(); 608 gch->gen_process_roots(_strong_roots_scope, 609 _gen->level(), 610 true, // Process younger gens, if any, 611 // as strong roots. 612 GenCollectedHeap::SO_ScavengeCodeCache, 613 GenCollectedHeap::StrongAndWeakRoots, 614 &par_scan_state.to_space_root_closure(), 615 &par_scan_state.older_gen_closure(), 616 &cld_scan_closure); 617 618 par_scan_state.end_strong_roots(); 619 620 // "evacuate followers". 621 par_scan_state.evacuate_followers_closure().do_void(); 622 } 623 624 #ifdef _MSC_VER 625 #pragma warning( push ) 626 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 627 #endif 628 ParNewGeneration:: 629 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) 630 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), 631 _overflow_list(NULL), 632 _is_alive_closure(this), 633 _plab_stats(YoungPLABSize, PLABWeight) 634 { 635 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 636 NOT_PRODUCT(_num_par_pushes = 0;) 637 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 638 guarantee(_task_queues != NULL, "task_queues allocation failure."); 639 640 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { 641 ObjToScanQueue *q = new ObjToScanQueue(); 642 guarantee(q != NULL, "work_queue Allocation failure."); 643 _task_queues->register_queue(i1, q); 644 } 645 646 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 647 _task_queues->queue(i2)->initialize(); 648 649 _overflow_stacks = NULL; 650 if (ParGCUseLocalOverflow) { 651 652 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal 653 // with ',' 654 typedef Stack<oop, mtGC> GCOopStack; 655 656 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 657 for (size_t i = 0; i < ParallelGCThreads; ++i) { 658 new (_overflow_stacks + i) Stack<oop, mtGC>(); 659 } 660 } 661 662 if (UsePerfData) { 663 EXCEPTION_MARK; 664 ResourceMark rm; 665 666 const char* cname = 667 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 668 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 669 ParallelGCThreads, CHECK); 670 } 671 } 672 #ifdef _MSC_VER 673 #pragma warning( pop ) 674 #endif 675 676 // ParNewGeneration:: 677 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 678 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 679 680 template <class T> 681 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 682 #ifdef ASSERT 683 { 684 assert(!oopDesc::is_null(*p), "expected non-null ref"); 685 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 686 // We never expect to see a null reference being processed 687 // as a weak reference. 688 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 689 } 690 #endif // ASSERT 691 692 _par_cl->do_oop_nv(p); 693 694 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 695 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 696 _rs->write_ref_field_gc_par(p, obj); 697 } 698 } 699 700 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 701 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 702 703 // ParNewGeneration:: 704 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 705 DefNewGeneration::KeepAliveClosure(cl) {} 706 707 template <class T> 708 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 709 #ifdef ASSERT 710 { 711 assert(!oopDesc::is_null(*p), "expected non-null ref"); 712 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 713 // We never expect to see a null reference being processed 714 // as a weak reference. 715 assert(obj->is_oop(), "expected an oop while scanning weak refs"); 716 } 717 #endif // ASSERT 718 719 _cl->do_oop_nv(p); 720 721 if (GenCollectedHeap::heap()->is_in_reserved(p)) { 722 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 723 _rs->write_ref_field_gc_par(p, obj); 724 } 725 } 726 727 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 728 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 729 730 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 731 T heap_oop = oopDesc::load_heap_oop(p); 732 if (!oopDesc::is_null(heap_oop)) { 733 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 734 if ((HeapWord*)obj < _boundary) { 735 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 736 oop new_obj = obj->is_forwarded() 737 ? obj->forwardee() 738 : _g->DefNewGeneration::copy_to_survivor_space(obj); 739 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 740 } 741 if (_gc_barrier) { 742 // If p points to a younger generation, mark the card. 743 if ((HeapWord*)obj < _gen_boundary) { 744 _rs->write_ref_field_gc_par(p, obj); 745 } 746 } 747 } 748 } 749 750 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 751 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 752 753 class ParNewRefProcTaskProxy: public AbstractGangTask { 754 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 755 public: 756 ParNewRefProcTaskProxy(ProcessTask& task, 757 ParNewGeneration& gen, 758 Generation& old_gen, 759 HeapWord* young_old_boundary, 760 ParScanThreadStateSet& state_set); 761 762 private: 763 virtual void work(uint worker_id); 764 virtual void set_for_termination(uint active_workers) { 765 _state_set.terminator()->reset_for_reuse(active_workers); 766 } 767 private: 768 ParNewGeneration& _gen; 769 ProcessTask& _task; 770 Generation& _old_gen; 771 HeapWord* _young_old_boundary; 772 ParScanThreadStateSet& _state_set; 773 }; 774 775 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 776 ParNewGeneration& gen, 777 Generation& old_gen, 778 HeapWord* young_old_boundary, 779 ParScanThreadStateSet& state_set) 780 : AbstractGangTask("ParNewGeneration parallel reference processing"), 781 _gen(gen), 782 _task(task), 783 _old_gen(old_gen), 784 _young_old_boundary(young_old_boundary), 785 _state_set(state_set) 786 { 787 } 788 789 void ParNewRefProcTaskProxy::work(uint worker_id) 790 { 791 ResourceMark rm; 792 HandleMark hm; 793 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 794 par_scan_state.set_young_old_boundary(_young_old_boundary); 795 _task.work(worker_id, par_scan_state.is_alive_closure(), 796 par_scan_state.keep_alive_closure(), 797 par_scan_state.evacuate_followers_closure()); 798 } 799 800 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 801 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 802 EnqueueTask& _task; 803 804 public: 805 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 806 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 807 _task(task) 808 { } 809 810 virtual void work(uint worker_id) 811 { 812 _task.work(worker_id); 813 } 814 }; 815 816 817 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) 818 { 819 GenCollectedHeap* gch = GenCollectedHeap::heap(); 820 FlexibleWorkGang* workers = gch->workers(); 821 assert(workers != NULL, "Need parallel worker threads."); 822 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); 823 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), 824 _generation.reserved().end(), _state_set); 825 workers->run_task(&rp_task); 826 _state_set.reset(0 /* bad value in debug if not reset */, 827 _generation.promotion_failed()); 828 } 829 830 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) 831 { 832 GenCollectedHeap* gch = GenCollectedHeap::heap(); 833 FlexibleWorkGang* workers = gch->workers(); 834 assert(workers != NULL, "Need parallel worker threads."); 835 ParNewRefEnqueueTaskProxy enq_task(task); 836 workers->run_task(&enq_task); 837 } 838 839 void ParNewRefProcTaskExecutor::set_single_threaded_mode() 840 { 841 _state_set.flush(); 842 GenCollectedHeap* gch = GenCollectedHeap::heap(); 843 gch->set_par_threads(0); // 0 ==> non-parallel. 844 gch->save_marks(); 845 } 846 847 ScanClosureWithParBarrier:: 848 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 849 ScanClosure(g, gc_barrier) {} 850 851 EvacuateFollowersClosureGeneral:: 852 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 853 OopsInGenClosure* cur, 854 OopsInGenClosure* older) : 855 _gch(gch), _level(level), 856 _scan_cur_or_nonheap(cur), _scan_older(older) 857 {} 858 859 void EvacuateFollowersClosureGeneral::do_void() { 860 do { 861 // Beware: this call will lead to closure applications via virtual 862 // calls. 863 _gch->oop_since_save_marks_iterate(_level, 864 _scan_cur_or_nonheap, 865 _scan_older); 866 } while (!_gch->no_allocs_since_save_marks(_level)); 867 } 868 869 870 // A Generation that does parallel young-gen collection. 871 872 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { 873 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 874 _promo_failure_scan_stack.clear(true); // Clear cached segments. 875 876 remove_forwarding_pointers(); 877 if (PrintGCDetails) { 878 gclog_or_tty->print(" (promotion failed)"); 879 } 880 // All the spaces are in play for mark-sweep. 881 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 882 from()->set_next_compaction_space(to()); 883 gch->set_incremental_collection_failed(); 884 // Inform the next generation that a promotion failure occurred. 885 _old_gen->promotion_failure_occurred(); 886 887 // Trace promotion failure in the parallel GC threads 888 thread_state_set.trace_promotion_failed(gc_tracer()); 889 // Single threaded code may have reported promotion failure to the global state 890 if (_promotion_failed_info.has_failed()) { 891 _gc_tracer.report_promotion_failed(_promotion_failed_info); 892 } 893 // Reset the PromotionFailureALot counters. 894 NOT_PRODUCT(gch->reset_promotion_should_fail();) 895 } 896 897 void ParNewGeneration::collect(bool full, 898 bool clear_all_soft_refs, 899 size_t size, 900 bool is_tlab) { 901 assert(full || size > 0, "otherwise we don't want to collect"); 902 903 GenCollectedHeap* gch = GenCollectedHeap::heap(); 904 905 _gc_timer->register_gc_start(); 906 907 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 908 FlexibleWorkGang* workers = gch->workers(); 909 assert(workers != NULL, "Need workgang for parallel work"); 910 uint active_workers = 911 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 912 workers->active_workers(), 913 Threads::number_of_non_daemon_threads()); 914 workers->set_active_workers(active_workers); 915 _old_gen = gch->old_gen(); 916 917 // If the next generation is too full to accommodate worst-case promotion 918 // from this generation, pass on collection; let the next generation 919 // do it. 920 if (!collection_attempt_is_safe()) { 921 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 922 return; 923 } 924 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 925 926 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 927 gch->trace_heap_before_gc(gc_tracer()); 928 929 init_assuming_no_promotion_failure(); 930 931 if (UseAdaptiveSizePolicy) { 932 set_survivor_overflow(false); 933 size_policy->minor_collection_begin(); 934 } 935 936 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id()); 937 // Capture heap used before collection (for printing). 938 size_t gch_prev_used = gch->used(); 939 940 age_table()->clear(); 941 to()->clear(SpaceDecorator::Mangle); 942 943 gch->save_marks(); 944 assert(workers != NULL, "Need parallel worker threads."); 945 uint n_workers = active_workers; 946 947 // Set the correct parallelism (number of queues) in the reference processor 948 ref_processor()->set_active_mt_degree(n_workers); 949 950 // Always set the terminator for the active number of workers 951 // because only those workers go through the termination protocol. 952 ParallelTaskTerminator _term(n_workers, task_queues()); 953 ParScanThreadStateSet thread_state_set(workers->active_workers(), 954 *to(), *this, *_old_gen, *task_queues(), 955 _overflow_stacks, desired_plab_sz(), _term); 956 957 { 958 StrongRootsScope srs(n_workers); 959 960 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); 961 gch->set_par_threads(n_workers); 962 gch->rem_set()->prepare_for_younger_refs_iterate(true); 963 // It turns out that even when we're using 1 thread, doing the work in a 964 // separate thread causes wide variance in run times. We can't help this 965 // in the multi-threaded case, but we special-case n=1 here to get 966 // repeatable measurements of the 1-thread overhead of the parallel code. 967 if (n_workers > 1) { 968 workers->run_task(&tsk); 969 } else { 970 tsk.work(0); 971 } 972 } 973 974 thread_state_set.reset(0 /* Bad value in debug if not reset */, 975 promotion_failed()); 976 977 // Trace and reset failed promotion info. 978 if (promotion_failed()) { 979 thread_state_set.trace_promotion_failed(gc_tracer()); 980 } 981 982 // Process (weak) reference objects found during scavenge. 983 ReferenceProcessor* rp = ref_processor(); 984 IsAliveClosure is_alive(this); 985 ScanWeakRefClosure scan_weak_ref(this); 986 KeepAliveClosure keep_alive(&scan_weak_ref); 987 ScanClosure scan_without_gc_barrier(this, false); 988 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 989 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 990 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 991 &scan_without_gc_barrier, &scan_with_gc_barrier); 992 rp->setup_policy(clear_all_soft_refs); 993 // Can the mt_degree be set later (at run_task() time would be best)? 994 rp->set_active_mt_degree(active_workers); 995 ReferenceProcessorStats stats; 996 if (rp->processing_is_mt()) { 997 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 998 stats = rp->process_discovered_references(&is_alive, &keep_alive, 999 &evacuate_followers, &task_executor, 1000 _gc_timer, _gc_tracer.gc_id()); 1001 } else { 1002 thread_state_set.flush(); 1003 gch->set_par_threads(0); // 0 ==> non-parallel. 1004 gch->save_marks(); 1005 stats = rp->process_discovered_references(&is_alive, &keep_alive, 1006 &evacuate_followers, NULL, 1007 _gc_timer, _gc_tracer.gc_id()); 1008 } 1009 _gc_tracer.report_gc_reference_stats(stats); 1010 if (!promotion_failed()) { 1011 // Swap the survivor spaces. 1012 eden()->clear(SpaceDecorator::Mangle); 1013 from()->clear(SpaceDecorator::Mangle); 1014 if (ZapUnusedHeapArea) { 1015 // This is now done here because of the piece-meal mangling which 1016 // can check for valid mangling at intermediate points in the 1017 // collection(s). When a minor collection fails to collect 1018 // sufficient space resizing of the young generation can occur 1019 // an redistribute the spaces in the young generation. Mangle 1020 // here so that unzapped regions don't get distributed to 1021 // other spaces. 1022 to()->mangle_unused_area(); 1023 } 1024 swap_spaces(); 1025 1026 // A successful scavenge should restart the GC time limit count which is 1027 // for full GC's. 1028 size_policy->reset_gc_overhead_limit_count(); 1029 1030 assert(to()->is_empty(), "to space should be empty now"); 1031 1032 adjust_desired_tenuring_threshold(); 1033 } else { 1034 handle_promotion_failed(gch, thread_state_set); 1035 } 1036 // set new iteration safe limit for the survivor spaces 1037 from()->set_concurrent_iteration_safe_limit(from()->top()); 1038 to()->set_concurrent_iteration_safe_limit(to()->top()); 1039 1040 if (ResizePLAB) { 1041 plab_stats()->adjust_desired_plab_sz(n_workers); 1042 } 1043 1044 if (PrintGC && !PrintGCDetails) { 1045 gch->print_heap_change(gch_prev_used); 1046 } 1047 1048 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); 1049 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); 1050 1051 if (UseAdaptiveSizePolicy) { 1052 size_policy->minor_collection_end(gch->gc_cause()); 1053 size_policy->avg_survived()->sample(from()->used()); 1054 } 1055 1056 // We need to use a monotonically non-decreasing time in ms 1057 // or we will see time-warp warnings and os::javaTimeMillis() 1058 // does not guarantee monotonicity. 1059 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1060 update_time_of_last_gc(now); 1061 1062 rp->set_enqueuing_is_done(true); 1063 if (rp->processing_is_mt()) { 1064 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); 1065 rp->enqueue_discovered_references(&task_executor); 1066 } else { 1067 rp->enqueue_discovered_references(NULL); 1068 } 1069 rp->verify_no_references_recorded(); 1070 1071 gch->trace_heap_after_gc(gc_tracer()); 1072 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1073 1074 _gc_timer->register_gc_end(); 1075 1076 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1077 } 1078 1079 static int sum; 1080 void ParNewGeneration::waste_some_time() { 1081 for (int i = 0; i < 100; i++) { 1082 sum += i; 1083 } 1084 } 1085 1086 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1087 1088 // Because of concurrency, there are times where an object for which 1089 // "is_forwarded()" is true contains an "interim" forwarding pointer 1090 // value. Such a value will soon be overwritten with a real value. 1091 // This method requires "obj" to have a forwarding pointer, and waits, if 1092 // necessary for a real one to be inserted, and returns it. 1093 1094 oop ParNewGeneration::real_forwardee(oop obj) { 1095 oop forward_ptr = obj->forwardee(); 1096 if (forward_ptr != ClaimedForwardPtr) { 1097 return forward_ptr; 1098 } else { 1099 return real_forwardee_slow(obj); 1100 } 1101 } 1102 1103 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1104 // Spin-read if it is claimed but not yet written by another thread. 1105 oop forward_ptr = obj->forwardee(); 1106 while (forward_ptr == ClaimedForwardPtr) { 1107 waste_some_time(); 1108 assert(obj->is_forwarded(), "precondition"); 1109 forward_ptr = obj->forwardee(); 1110 } 1111 return forward_ptr; 1112 } 1113 1114 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1115 if (m->must_be_preserved_for_promotion_failure(obj)) { 1116 // We should really have separate per-worker stacks, rather 1117 // than use locking of a common pair of stacks. 1118 MutexLocker ml(ParGCRareEvent_lock); 1119 preserve_mark(obj, m); 1120 } 1121 } 1122 1123 // Multiple GC threads may try to promote an object. If the object 1124 // is successfully promoted, a forwarding pointer will be installed in 1125 // the object in the young generation. This method claims the right 1126 // to install the forwarding pointer before it copies the object, 1127 // thus avoiding the need to undo the copy as in 1128 // copy_to_survivor_space_avoiding_with_undo. 1129 1130 oop ParNewGeneration::copy_to_survivor_space( 1131 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { 1132 // In the sequential version, this assert also says that the object is 1133 // not forwarded. That might not be the case here. It is the case that 1134 // the caller observed it to be not forwarded at some time in the past. 1135 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1136 1137 // The sequential code read "old->age()" below. That doesn't work here, 1138 // since the age is in the mark word, and that might be overwritten with 1139 // a forwarding pointer by a parallel thread. So we must save the mark 1140 // word in a local and then analyze it. 1141 oopDesc dummyOld; 1142 dummyOld.set_mark(m); 1143 assert(!dummyOld.is_forwarded(), 1144 "should not be called with forwarding pointer mark word."); 1145 1146 oop new_obj = NULL; 1147 oop forward_ptr; 1148 1149 // Try allocating obj in to-space (unless too old) 1150 if (dummyOld.age() < tenuring_threshold()) { 1151 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1152 if (new_obj == NULL) { 1153 set_survivor_overflow(true); 1154 } 1155 } 1156 1157 if (new_obj == NULL) { 1158 // Either to-space is full or we decided to promote 1159 // try allocating obj tenured 1160 1161 // Attempt to install a null forwarding pointer (atomically), 1162 // to claim the right to install the real forwarding pointer. 1163 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1164 if (forward_ptr != NULL) { 1165 // someone else beat us to it. 1166 return real_forwardee(old); 1167 } 1168 1169 if (!_promotion_failed) { 1170 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1171 old, m, sz); 1172 } 1173 1174 if (new_obj == NULL) { 1175 // promotion failed, forward to self 1176 _promotion_failed = true; 1177 new_obj = old; 1178 1179 preserve_mark_if_necessary(old, m); 1180 par_scan_state->register_promotion_failure(sz); 1181 } 1182 1183 old->forward_to(new_obj); 1184 forward_ptr = NULL; 1185 } else { 1186 // Is in to-space; do copying ourselves. 1187 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1188 assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value."); 1189 forward_ptr = old->forward_to_atomic(new_obj); 1190 // Restore the mark word copied above. 1191 new_obj->set_mark(m); 1192 // Increment age if obj still in new generation 1193 new_obj->incr_age(); 1194 par_scan_state->age_table()->add(new_obj, sz); 1195 } 1196 assert(new_obj != NULL, "just checking"); 1197 1198 #ifndef PRODUCT 1199 // This code must come after the CAS test, or it will print incorrect 1200 // information. 1201 if (TraceScavenge) { 1202 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1203 is_in_reserved(new_obj) ? "copying" : "tenuring", 1204 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); 1205 } 1206 #endif 1207 1208 if (forward_ptr == NULL) { 1209 oop obj_to_push = new_obj; 1210 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1211 // Length field used as index of next element to be scanned. 1212 // Real length can be obtained from real_forwardee() 1213 arrayOop(old)->set_length(0); 1214 obj_to_push = old; 1215 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1216 "push forwarded object"); 1217 } 1218 // Push it on one of the queues of to-be-scanned objects. 1219 bool simulate_overflow = false; 1220 NOT_PRODUCT( 1221 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1222 // simulate a stack overflow 1223 simulate_overflow = true; 1224 } 1225 ) 1226 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1227 // Add stats for overflow pushes. 1228 if (Verbose && PrintGCDetails) { 1229 gclog_or_tty->print("queue overflow!\n"); 1230 } 1231 push_on_overflow_list(old, par_scan_state); 1232 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1233 } 1234 1235 return new_obj; 1236 } 1237 1238 // Oops. Someone beat us to it. Undo the allocation. Where did we 1239 // allocate it? 1240 if (is_in_reserved(new_obj)) { 1241 // Must be in to_space. 1242 assert(to()->is_in_reserved(new_obj), "Checking"); 1243 if (forward_ptr == ClaimedForwardPtr) { 1244 // Wait to get the real forwarding pointer value. 1245 forward_ptr = real_forwardee(old); 1246 } 1247 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1248 } 1249 1250 return forward_ptr; 1251 } 1252 1253 #ifndef PRODUCT 1254 // It's OK to call this multi-threaded; the worst thing 1255 // that can happen is that we'll get a bunch of closely 1256 // spaced simulated overflows, but that's OK, in fact 1257 // probably good as it would exercise the overflow code 1258 // under contention. 1259 bool ParNewGeneration::should_simulate_overflow() { 1260 if (_overflow_counter-- <= 0) { // just being defensive 1261 _overflow_counter = ParGCWorkQueueOverflowInterval; 1262 return true; 1263 } else { 1264 return false; 1265 } 1266 } 1267 #endif 1268 1269 // In case we are using compressed oops, we need to be careful. 1270 // If the object being pushed is an object array, then its length 1271 // field keeps track of the "grey boundary" at which the next 1272 // incremental scan will be done (see ParGCArrayScanChunk). 1273 // When using compressed oops, this length field is kept in the 1274 // lower 32 bits of the erstwhile klass word and cannot be used 1275 // for the overflow chaining pointer (OCP below). As such the OCP 1276 // would itself need to be compressed into the top 32-bits in this 1277 // case. Unfortunately, see below, in the event that we have a 1278 // promotion failure, the node to be pushed on the list can be 1279 // outside of the Java heap, so the heap-based pointer compression 1280 // would not work (we would have potential aliasing between C-heap 1281 // and Java-heap pointers). For this reason, when using compressed 1282 // oops, we simply use a worker-thread-local, non-shared overflow 1283 // list in the form of a growable array, with a slightly different 1284 // overflow stack draining strategy. If/when we start using fat 1285 // stacks here, we can go back to using (fat) pointer chains 1286 // (although some performance comparisons would be useful since 1287 // single global lists have their own performance disadvantages 1288 // as we were made painfully aware not long ago, see 6786503). 1289 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1290 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1291 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1292 if (ParGCUseLocalOverflow) { 1293 // In the case of compressed oops, we use a private, not-shared 1294 // overflow stack. 1295 par_scan_state->push_on_overflow_stack(from_space_obj); 1296 } else { 1297 assert(!UseCompressedOops, "Error"); 1298 // if the object has been forwarded to itself, then we cannot 1299 // use the klass pointer for the linked list. Instead we have 1300 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1301 // XXX This is horribly inefficient when a promotion failure occurs 1302 // and should be fixed. XXX FIX ME !!! 1303 #ifndef PRODUCT 1304 Atomic::inc_ptr(&_num_par_pushes); 1305 assert(_num_par_pushes > 0, "Tautology"); 1306 #endif 1307 if (from_space_obj->forwardee() == from_space_obj) { 1308 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1309 listhead->forward_to(from_space_obj); 1310 from_space_obj = listhead; 1311 } 1312 oop observed_overflow_list = _overflow_list; 1313 oop cur_overflow_list; 1314 do { 1315 cur_overflow_list = observed_overflow_list; 1316 if (cur_overflow_list != BUSY) { 1317 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1318 } else { 1319 from_space_obj->set_klass_to_list_ptr(NULL); 1320 } 1321 observed_overflow_list = 1322 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); 1323 } while (cur_overflow_list != observed_overflow_list); 1324 } 1325 } 1326 1327 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1328 bool res; 1329 1330 if (ParGCUseLocalOverflow) { 1331 res = par_scan_state->take_from_overflow_stack(); 1332 } else { 1333 assert(!UseCompressedOops, "Error"); 1334 res = take_from_overflow_list_work(par_scan_state); 1335 } 1336 return res; 1337 } 1338 1339 1340 // *NOTE*: The overflow list manipulation code here and 1341 // in CMSCollector:: are very similar in shape, 1342 // except that in the CMS case we thread the objects 1343 // directly into the list via their mark word, and do 1344 // not need to deal with special cases below related 1345 // to chunking of object arrays and promotion failure 1346 // handling. 1347 // CR 6797058 has been filed to attempt consolidation of 1348 // the common code. 1349 // Because of the common code, if you make any changes in 1350 // the code below, please check the CMS version to see if 1351 // similar changes might be needed. 1352 // See CMSCollector::par_take_from_overflow_list() for 1353 // more extensive documentation comments. 1354 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1355 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1356 // How many to take? 1357 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1358 (size_t)ParGCDesiredObjsFromOverflowList); 1359 1360 assert(!UseCompressedOops, "Error"); 1361 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1362 if (_overflow_list == NULL) return false; 1363 1364 // Otherwise, there was something there; try claiming the list. 1365 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1366 // Trim off a prefix of at most objsFromOverflow items 1367 Thread* tid = Thread::current(); 1368 size_t spin_count = (size_t)ParallelGCThreads; 1369 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1370 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1371 // someone grabbed it before we did ... 1372 // ... we spin for a short while... 1373 os::sleep(tid, sleep_time_millis, false); 1374 if (_overflow_list == NULL) { 1375 // nothing left to take 1376 return false; 1377 } else if (_overflow_list != BUSY) { 1378 // try and grab the prefix 1379 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); 1380 } 1381 } 1382 if (prefix == NULL || prefix == BUSY) { 1383 // Nothing to take or waited long enough 1384 if (prefix == NULL) { 1385 // Write back the NULL in case we overwrote it with BUSY above 1386 // and it is still the same value. 1387 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1388 } 1389 return false; 1390 } 1391 assert(prefix != NULL && prefix != BUSY, "Error"); 1392 size_t i = 1; 1393 oop cur = prefix; 1394 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { 1395 i++; cur = cur->list_ptr_from_klass(); 1396 } 1397 1398 // Reattach remaining (suffix) to overflow list 1399 if (cur->klass_or_null() == NULL) { 1400 // Write back the NULL in lieu of the BUSY we wrote 1401 // above and it is still the same value. 1402 if (_overflow_list == BUSY) { 1403 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 1404 } 1405 } else { 1406 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); 1407 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list 1408 cur->set_klass_to_list_ptr(NULL); // break off suffix 1409 // It's possible that the list is still in the empty(busy) state 1410 // we left it in a short while ago; in that case we may be 1411 // able to place back the suffix. 1412 oop observed_overflow_list = _overflow_list; 1413 oop cur_overflow_list = observed_overflow_list; 1414 bool attached = false; 1415 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1416 observed_overflow_list = 1417 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1418 if (cur_overflow_list == observed_overflow_list) { 1419 attached = true; 1420 break; 1421 } else cur_overflow_list = observed_overflow_list; 1422 } 1423 if (!attached) { 1424 // Too bad, someone else got in in between; we'll need to do a splice. 1425 // Find the last item of suffix list 1426 oop last = suffix; 1427 while (last->klass_or_null() != NULL) { 1428 last = last->list_ptr_from_klass(); 1429 } 1430 // Atomically prepend suffix to current overflow list 1431 observed_overflow_list = _overflow_list; 1432 do { 1433 cur_overflow_list = observed_overflow_list; 1434 if (cur_overflow_list != BUSY) { 1435 // Do the splice ... 1436 last->set_klass_to_list_ptr(cur_overflow_list); 1437 } else { // cur_overflow_list == BUSY 1438 last->set_klass_to_list_ptr(NULL); 1439 } 1440 observed_overflow_list = 1441 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); 1442 } while (cur_overflow_list != observed_overflow_list); 1443 } 1444 } 1445 1446 // Push objects on prefix list onto this thread's work queue 1447 assert(prefix != NULL && prefix != BUSY, "program logic"); 1448 cur = prefix; 1449 ssize_t n = 0; 1450 while (cur != NULL) { 1451 oop obj_to_push = cur->forwardee(); 1452 oop next = cur->list_ptr_from_klass(); 1453 cur->set_klass(obj_to_push->klass()); 1454 // This may be an array object that is self-forwarded. In that case, the list pointer 1455 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1456 if (!is_in_reserved(cur)) { 1457 // This can become a scaling bottleneck when there is work queue overflow coincident 1458 // with promotion failure. 1459 oopDesc* f = cur; 1460 FREE_C_HEAP_ARRAY(oopDesc, f); 1461 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1462 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1463 obj_to_push = cur; 1464 } 1465 bool ok = work_q->push(obj_to_push); 1466 assert(ok, "Should have succeeded"); 1467 cur = next; 1468 n++; 1469 } 1470 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1471 #ifndef PRODUCT 1472 assert(_num_par_pushes >= n, "Too many pops?"); 1473 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 1474 #endif 1475 return true; 1476 } 1477 #undef BUSY 1478 1479 void ParNewGeneration::ref_processor_init() { 1480 if (_ref_processor == NULL) { 1481 // Allocate and initialize a reference processor 1482 _ref_processor = 1483 new ReferenceProcessor(_reserved, // span 1484 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1485 (int) ParallelGCThreads, // mt processing degree 1486 refs_discovery_is_mt(), // mt discovery 1487 (int) ParallelGCThreads, // mt discovery degree 1488 refs_discovery_is_atomic(), // atomic_discovery 1489 NULL); // is_alive_non_header 1490 } 1491 } 1492 1493 const char* ParNewGeneration::name() const { 1494 return "par new generation"; 1495 }