1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/cms/cmsHeap.hpp" 27 #include "gc/cms/compactibleFreeListSpace.hpp" 28 #include "gc/cms/concurrentMarkSweepGeneration.hpp" 29 #include "gc/cms/parNewGeneration.inline.hpp" 30 #include "gc/cms/parOopClosures.inline.hpp" 31 #include "gc/serial/defNewGeneration.inline.hpp" 32 #include "gc/shared/adaptiveSizePolicy.hpp" 33 #include "gc/shared/ageTable.inline.hpp" 34 #include "gc/shared/copyFailedInfo.hpp" 35 #include "gc/shared/gcHeapSummary.hpp" 36 #include "gc/shared/gcTimer.hpp" 37 #include "gc/shared/gcTrace.hpp" 38 #include "gc/shared/gcTraceTime.inline.hpp" 39 #include "gc/shared/genCollectedHeap.hpp" 40 #include "gc/shared/genOopClosures.inline.hpp" 41 #include "gc/shared/generation.hpp" 42 #include "gc/shared/plab.inline.hpp" 43 #include "gc/shared/preservedMarks.inline.hpp" 44 #include "gc/shared/referencePolicy.hpp" 45 #include "gc/shared/space.hpp" 46 #include "gc/shared/spaceDecorator.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/weakProcessor.hpp" 50 #include "gc/shared/workgroup.hpp" 51 #include "logging/log.hpp" 52 #include "logging/logStream.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/objArrayOop.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "runtime/atomic.hpp" 57 #include "runtime/handles.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/java.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "utilities/copy.hpp" 62 #include "utilities/globalDefinitions.hpp" 63 #include "utilities/stack.inline.hpp" 64 65 ParScanThreadState::ParScanThreadState(Space* to_space_, 66 ParNewGeneration* young_gen_, 67 Generation* old_gen_, 68 int thread_num_, 69 ObjToScanQueueSet* work_queue_set_, 70 Stack<oop, mtGC>* overflow_stacks_, 71 PreservedMarks* preserved_marks_, 72 size_t desired_plab_sz_, 73 ParallelTaskTerminator& term_) : 74 _to_space(to_space_), 75 _old_gen(old_gen_), 76 _young_gen(young_gen_), 77 _thread_num(thread_num_), 78 _work_queue(work_queue_set_->queue(thread_num_)), 79 _to_space_full(false), 80 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 81 _preserved_marks(preserved_marks_), 82 _ageTable(false), // false ==> not the global age table, no perf data. 83 _to_space_alloc_buffer(desired_plab_sz_), 84 _to_space_closure(young_gen_, this), 85 _old_gen_closure(young_gen_, this), 86 _to_space_root_closure(young_gen_, this), 87 _old_gen_root_closure(young_gen_, this), 88 _older_gen_closure(young_gen_, this), 89 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 90 &_to_space_root_closure, young_gen_, &_old_gen_root_closure, 91 work_queue_set_, &term_), 92 _is_alive_closure(young_gen_), 93 _scan_weak_ref_closure(young_gen_, this), 94 _keep_alive_closure(&_scan_weak_ref_closure), 95 _strong_roots_time(0.0), 96 _term_time(0.0) 97 { 98 #if TASKQUEUE_STATS 99 _term_attempts = 0; 100 _overflow_refills = 0; 101 _overflow_refill_objs = 0; 102 #endif // TASKQUEUE_STATS 103 104 _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 105 _hash_seed = 17; // Might want to take time-based random value. 106 _start = os::elapsedTime(); 107 _old_gen_closure.set_generation(old_gen_); 108 _old_gen_root_closure.set_generation(old_gen_); 109 } 110 111 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 112 size_t plab_word_size) { 113 ChunkArray* sca = survivor_chunk_array(); 114 if (sca != NULL) { 115 // A non-null SCA implies that we want the PLAB data recorded. 116 sca->record_sample(plab_start, plab_word_size); 117 } 118 } 119 120 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 121 return new_obj->is_objArray() && 122 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 123 new_obj != old_obj; 124 } 125 126 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 127 assert(old->is_objArray(), "must be obj array"); 128 assert(old->is_forwarded(), "must be forwarded"); 129 assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap."); 130 assert(!old_gen()->is_in(old), "must be in young generation."); 131 132 objArrayOop obj = objArrayOop(old->forwardee()); 133 // Process ParGCArrayScanChunk elements now 134 // and push the remainder back onto queue 135 int start = arrayOop(old)->length(); 136 int end = obj->length(); 137 int remainder = end - start; 138 assert(start <= end, "just checking"); 139 if (remainder > 2 * ParGCArrayScanChunk) { 140 // Test above combines last partial chunk with a full chunk 141 end = start + ParGCArrayScanChunk; 142 arrayOop(old)->set_length(end); 143 // Push remainder. 144 bool ok = work_queue()->push(old); 145 assert(ok, "just popped, push must be okay"); 146 } else { 147 // Restore length so that it can be used if there 148 // is a promotion failure and forwarding pointers 149 // must be removed. 150 arrayOop(old)->set_length(end); 151 } 152 153 // process our set of indices (include header in first chunk) 154 // should make sure end is even (aligned to HeapWord in case of compressed oops) 155 if ((HeapWord *)obj < young_old_boundary()) { 156 // object is in to_space 157 obj->oop_iterate_range(&_to_space_closure, start, end); 158 } else { 159 // object is in old generation 160 obj->oop_iterate_range(&_old_gen_closure, start, end); 161 } 162 } 163 164 void ParScanThreadState::trim_queues(int max_size) { 165 ObjToScanQueue* queue = work_queue(); 166 do { 167 while (queue->size() > (juint)max_size) { 168 oop obj_to_scan; 169 if (queue->pop_local(obj_to_scan)) { 170 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 171 if (obj_to_scan->is_objArray() && 172 obj_to_scan->is_forwarded() && 173 obj_to_scan->forwardee() != obj_to_scan) { 174 scan_partial_array_and_push_remainder(obj_to_scan); 175 } else { 176 // object is in to_space 177 obj_to_scan->oop_iterate(&_to_space_closure); 178 } 179 } else { 180 // object is in old generation 181 obj_to_scan->oop_iterate(&_old_gen_closure); 182 } 183 } 184 } 185 // For the case of compressed oops, we have a private, non-shared 186 // overflow stack, so we eagerly drain it so as to more evenly 187 // distribute load early. Note: this may be good to do in 188 // general rather than delay for the final stealing phase. 189 // If applicable, we'll transfer a set of objects over to our 190 // work queue, allowing them to be stolen and draining our 191 // private overflow stack. 192 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 193 } 194 195 bool ParScanThreadState::take_from_overflow_stack() { 196 assert(ParGCUseLocalOverflow, "Else should not call"); 197 assert(young_gen()->overflow_list() == NULL, "Error"); 198 ObjToScanQueue* queue = work_queue(); 199 Stack<oop, mtGC>* const of_stack = overflow_stack(); 200 const size_t num_overflow_elems = of_stack->size(); 201 const size_t space_available = queue->max_elems() - queue->size(); 202 const size_t num_take_elems = MIN3(space_available / 4, 203 ParGCDesiredObjsFromOverflowList, 204 num_overflow_elems); 205 // Transfer the most recent num_take_elems from the overflow 206 // stack to our work queue. 207 for (size_t i = 0; i != num_take_elems; i++) { 208 oop cur = of_stack->pop(); 209 oop obj_to_push = cur->forwardee(); 210 assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap"); 211 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 212 assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 213 if (should_be_partially_scanned(obj_to_push, cur)) { 214 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 215 obj_to_push = cur; 216 } 217 bool ok = queue->push(obj_to_push); 218 assert(ok, "Should have succeeded"); 219 } 220 assert(young_gen()->overflow_list() == NULL, "Error"); 221 return num_take_elems > 0; // was something transferred? 222 } 223 224 void ParScanThreadState::push_on_overflow_stack(oop p) { 225 assert(ParGCUseLocalOverflow, "Else should not call"); 226 overflow_stack()->push(p); 227 assert(young_gen()->overflow_list() == NULL, "Error"); 228 } 229 230 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 231 // If the object is small enough, try to reallocate the buffer. 232 HeapWord* obj = NULL; 233 if (!_to_space_full) { 234 PLAB* const plab = to_space_alloc_buffer(); 235 Space* const sp = to_space(); 236 if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { 237 // Is small enough; abandon this buffer and start a new one. 238 plab->retire(); 239 // The minimum size has to be twice SurvivorAlignmentInBytes to 240 // allow for padding used in the alignment of 1 word. A padding 241 // of 1 is too small for a filler word so the padding size will 242 // be increased by SurvivorAlignmentInBytes. 243 size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize); 244 size_t buf_size = MAX2(plab->word_sz(), min_usable_size); 245 HeapWord* buf_space = sp->par_allocate(buf_size); 246 if (buf_space == NULL) { 247 const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize; 248 size_t free_bytes = sp->free(); 249 while(buf_space == NULL && free_bytes >= min_bytes) { 250 buf_size = free_bytes >> LogHeapWordSize; 251 assert(buf_size == (size_t)align_object_size(buf_size), "Invariant"); 252 buf_space = sp->par_allocate(buf_size); 253 free_bytes = sp->free(); 254 } 255 } 256 if (buf_space != NULL) { 257 plab->set_buf(buf_space, buf_size); 258 record_survivor_plab(buf_space, buf_size); 259 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 260 // Note that we cannot compare buf_size < word_sz below 261 // because of AlignmentReserve (see PLAB::allocate()). 262 assert(obj != NULL || plab->words_remaining() < word_sz, 263 "Else should have been able to allocate requested object size " 264 SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes " 265 SIZE_FORMAT ", words_remaining " SIZE_FORMAT, 266 word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining()); 267 // It's conceivable that we may be able to use the 268 // buffer we just grabbed for subsequent small requests 269 // even if not for this one. 270 } else { 271 // We're used up. 272 _to_space_full = true; 273 } 274 } else { 275 // Too large; allocate the object individually. 276 obj = sp->par_allocate(word_sz); 277 } 278 } 279 return obj; 280 } 281 282 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 283 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 284 } 285 286 void ParScanThreadState::print_promotion_failure_size() { 287 if (_promotion_failed_info.has_failed()) { 288 log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ", 289 _thread_num, _promotion_failed_info.first_size()); 290 } 291 } 292 293 class ParScanThreadStateSet: StackObj { 294 public: 295 // Initializes states for the specified number of threads; 296 ParScanThreadStateSet(int num_threads, 297 Space& to_space, 298 ParNewGeneration& young_gen, 299 Generation& old_gen, 300 ObjToScanQueueSet& queue_set, 301 Stack<oop, mtGC>* overflow_stacks_, 302 PreservedMarksSet& preserved_marks_set, 303 size_t desired_plab_sz, 304 ParallelTaskTerminator& term); 305 306 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 307 308 inline ParScanThreadState& thread_state(int i); 309 310 void trace_promotion_failed(const YoungGCTracer* gc_tracer); 311 void reset(uint active_workers, bool promotion_failed); 312 void flush(); 313 314 #if TASKQUEUE_STATS 315 static void 316 print_termination_stats_hdr(outputStream* const st); 317 void print_termination_stats(); 318 static void 319 print_taskqueue_stats_hdr(outputStream* const st); 320 void print_taskqueue_stats(); 321 void reset_stats(); 322 #endif // TASKQUEUE_STATS 323 324 private: 325 ParallelTaskTerminator& _term; 326 ParNewGeneration& _young_gen; 327 Generation& _old_gen; 328 ParScanThreadState* _per_thread_states; 329 const int _num_threads; 330 public: 331 bool is_valid(int id) const { return id < _num_threads; } 332 ParallelTaskTerminator* terminator() { return &_term; } 333 }; 334 335 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, 336 Space& to_space, 337 ParNewGeneration& young_gen, 338 Generation& old_gen, 339 ObjToScanQueueSet& queue_set, 340 Stack<oop, mtGC>* overflow_stacks, 341 PreservedMarksSet& preserved_marks_set, 342 size_t desired_plab_sz, 343 ParallelTaskTerminator& term) 344 : _young_gen(young_gen), 345 _old_gen(old_gen), 346 _term(term), 347 _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)), 348 _num_threads(num_threads) 349 { 350 assert(num_threads > 0, "sanity check!"); 351 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 352 "overflow_stack allocation mismatch"); 353 // Initialize states. 354 for (int i = 0; i < num_threads; ++i) { 355 new(_per_thread_states + i) 356 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, 357 overflow_stacks, preserved_marks_set.get(i), 358 desired_plab_sz, term); 359 } 360 } 361 362 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { 363 assert(i >= 0 && i < _num_threads, "sanity check!"); 364 return _per_thread_states[i]; 365 } 366 367 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { 368 for (int i = 0; i < _num_threads; ++i) { 369 if (thread_state(i).promotion_failed()) { 370 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); 371 thread_state(i).promotion_failed_info().reset(); 372 } 373 } 374 } 375 376 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { 377 _term.reset_for_reuse(active_threads); 378 if (promotion_failed) { 379 for (int i = 0; i < _num_threads; ++i) { 380 thread_state(i).print_promotion_failure_size(); 381 } 382 } 383 } 384 385 #if TASKQUEUE_STATS 386 void ParScanThreadState::reset_stats() { 387 taskqueue_stats().reset(); 388 _term_attempts = 0; 389 _overflow_refills = 0; 390 _overflow_refill_objs = 0; 391 } 392 393 void ParScanThreadStateSet::reset_stats() { 394 for (int i = 0; i < _num_threads; ++i) { 395 thread_state(i).reset_stats(); 396 } 397 } 398 399 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { 400 st->print_raw_cr("GC Termination Stats"); 401 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"); 402 st->print_raw_cr("thr ms ms % ms % attempts"); 403 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); 404 } 405 406 void ParScanThreadStateSet::print_termination_stats() { 407 Log(gc, task, stats) log; 408 if (!log.is_debug()) { 409 return; 410 } 411 412 ResourceMark rm; 413 LogStream ls(log.debug()); 414 outputStream* st = &ls; 415 416 print_termination_stats_hdr(st); 417 418 for (int i = 0; i < _num_threads; ++i) { 419 const ParScanThreadState & pss = thread_state(i); 420 const double elapsed_ms = pss.elapsed_time() * 1000.0; 421 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 422 const double term_ms = pss.term_time() * 1000.0; 423 st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), 424 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 425 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 426 } 427 } 428 429 // Print stats related to work queue activity. 430 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { 431 st->print_raw_cr("GC Task Stats"); 432 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 433 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 434 } 435 436 void ParScanThreadStateSet::print_taskqueue_stats() { 437 if (!log_develop_is_enabled(Trace, gc, task, stats)) { 438 return; 439 } 440 Log(gc, task, stats) log; 441 ResourceMark rm; 442 LogStream ls(log.trace()); 443 outputStream* st = &ls; 444 print_taskqueue_stats_hdr(st); 445 446 TaskQueueStats totals; 447 for (int i = 0; i < _num_threads; ++i) { 448 const ParScanThreadState & pss = thread_state(i); 449 const TaskQueueStats & stats = pss.taskqueue_stats(); 450 st->print("%3d ", i); stats.print(st); st->cr(); 451 totals += stats; 452 453 if (pss.overflow_refills() > 0) { 454 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 455 SIZE_FORMAT_W(10) " overflow objects", 456 pss.overflow_refills(), pss.overflow_refill_objs()); 457 } 458 } 459 st->print("tot "); totals.print(st); st->cr(); 460 461 DEBUG_ONLY(totals.verify()); 462 } 463 #endif // TASKQUEUE_STATS 464 465 void ParScanThreadStateSet::flush() { 466 // Work in this loop should be kept as lightweight as 467 // possible since this might otherwise become a bottleneck 468 // to scaling. Should we add heavy-weight work into this 469 // loop, consider parallelizing the loop into the worker threads. 470 for (int i = 0; i < _num_threads; ++i) { 471 ParScanThreadState& par_scan_state = thread_state(i); 472 473 // Flush stats related to To-space PLAB activity and 474 // retire the last buffer. 475 par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); 476 477 // Every thread has its own age table. We need to merge 478 // them all into one. 479 AgeTable *local_table = par_scan_state.age_table(); 480 _young_gen.age_table()->merge(local_table); 481 482 // Inform old gen that we're done. 483 _old_gen.par_promote_alloc_done(i); 484 } 485 486 if (UseConcMarkSweepGC) { 487 // We need to call this even when ResizeOldPLAB is disabled 488 // so as to avoid breaking some asserts. While we may be able 489 // to avoid this by reorganizing the code a bit, I am loathe 490 // to do that unless we find cases where ergo leads to bad 491 // performance. 492 CompactibleFreeListSpaceLAB::compute_desired_plab_size(); 493 } 494 } 495 496 ParScanClosure::ParScanClosure(ParNewGeneration* g, 497 ParScanThreadState* par_scan_state) : 498 OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { 499 _boundary = _g->reserved().end(); 500 } 501 502 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 503 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 504 505 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 506 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 507 508 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 509 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 510 511 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 512 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 513 514 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 515 ParScanThreadState* par_scan_state) 516 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 517 {} 518 519 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 520 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 521 522 #ifdef WIN32 523 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 524 #endif 525 526 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 527 ParScanThreadState* par_scan_state_, 528 ParScanWithoutBarrierClosure* to_space_closure_, 529 ParScanWithBarrierClosure* old_gen_closure_, 530 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 531 ParNewGeneration* par_gen_, 532 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 533 ObjToScanQueueSet* task_queues_, 534 ParallelTaskTerminator* terminator_) : 535 536 _par_scan_state(par_scan_state_), 537 _to_space_closure(to_space_closure_), 538 _old_gen_closure(old_gen_closure_), 539 _to_space_root_closure(to_space_root_closure_), 540 _old_gen_root_closure(old_gen_root_closure_), 541 _par_gen(par_gen_), 542 _task_queues(task_queues_), 543 _terminator(terminator_) 544 {} 545 546 void ParEvacuateFollowersClosure::do_void() { 547 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 548 549 while (true) { 550 // Scan to-space and old-gen objs until we run out of both. 551 oop obj_to_scan; 552 par_scan_state()->trim_queues(0); 553 554 // We have no local work, attempt to steal from other threads. 555 556 // Attempt to steal work from promoted. 557 if (task_queues()->steal(par_scan_state()->thread_num(), 558 par_scan_state()->hash_seed(), 559 obj_to_scan)) { 560 bool res = work_q->push(obj_to_scan); 561 assert(res, "Empty queue should have room for a push."); 562 563 // If successful, goto Start. 564 continue; 565 566 // Try global overflow list. 567 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 568 continue; 569 } 570 571 // Otherwise, offer termination. 572 par_scan_state()->start_term_time(); 573 if (terminator()->offer_termination()) break; 574 par_scan_state()->end_term_time(); 575 } 576 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 577 "Broken overflow list?"); 578 // Finish the last termination pause. 579 par_scan_state()->end_term_time(); 580 } 581 582 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, 583 Generation* old_gen, 584 HeapWord* young_old_boundary, 585 ParScanThreadStateSet* state_set, 586 StrongRootsScope* strong_roots_scope) : 587 AbstractGangTask("ParNewGeneration collection"), 588 _young_gen(young_gen), _old_gen(old_gen), 589 _young_old_boundary(young_old_boundary), 590 _state_set(state_set), 591 _strong_roots_scope(strong_roots_scope) 592 {} 593 594 void ParNewGenTask::work(uint worker_id) { 595 CMSHeap* heap = CMSHeap::heap(); 596 // Since this is being done in a separate thread, need new resource 597 // and handle marks. 598 ResourceMark rm; 599 HandleMark hm; 600 601 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 602 assert(_state_set->is_valid(worker_id), "Should not have been called"); 603 604 par_scan_state.set_young_old_boundary(_young_old_boundary); 605 606 CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(), 607 heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); 608 609 par_scan_state.start_strong_roots(); 610 heap->young_process_roots(_strong_roots_scope, 611 &par_scan_state.to_space_root_closure(), 612 &par_scan_state.older_gen_closure(), 613 &cld_scan_closure); 614 615 par_scan_state.end_strong_roots(); 616 617 // "evacuate followers". 618 par_scan_state.evacuate_followers_closure().do_void(); 619 620 // This will collapse this worker's promoted object list that's 621 // created during the main ParNew parallel phase of ParNew. This has 622 // to be called after all workers have finished promoting objects 623 // and scanning promoted objects. It should be safe calling it from 624 // here, given that we can only reach here after all thread have 625 // offered termination, i.e., after there is no more work to be 626 // done. It will also disable promotion tracking for the rest of 627 // this GC as it's not necessary to be on during reference processing. 628 _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id); 629 } 630 631 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) 632 : DefNewGeneration(rs, initial_byte_size, "PCopy"), 633 _overflow_list(NULL), 634 _is_alive_closure(this), 635 _plab_stats("Young", YoungPLABSize, PLABWeight) 636 { 637 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 638 NOT_PRODUCT(_num_par_pushes = 0;) 639 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 640 guarantee(_task_queues != NULL, "task_queues allocation failure."); 641 642 for (uint i = 0; i < ParallelGCThreads; i++) { 643 ObjToScanQueue *q = new ObjToScanQueue(); 644 guarantee(q != NULL, "work_queue Allocation failure."); 645 _task_queues->register_queue(i, q); 646 } 647 648 for (uint i = 0; i < ParallelGCThreads; i++) { 649 _task_queues->queue(i)->initialize(); 650 } 651 652 _overflow_stacks = NULL; 653 if (ParGCUseLocalOverflow) { 654 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' 655 typedef Stack<oop, mtGC> GCOopStack; 656 657 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 658 for (size_t i = 0; i < ParallelGCThreads; ++i) { 659 new (_overflow_stacks + i) Stack<oop, mtGC>(); 660 } 661 } 662 663 if (UsePerfData) { 664 EXCEPTION_MARK; 665 ResourceMark rm; 666 667 const char* cname = 668 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 669 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 670 ParallelGCThreads, CHECK); 671 } 672 } 673 674 // ParNewGeneration:: 675 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 676 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 677 678 template <class T> 679 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 680 #ifdef ASSERT 681 { 682 assert(!oopDesc::is_null(*p), "expected non-null ref"); 683 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 684 // We never expect to see a null reference being processed 685 // as a weak reference. 686 assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); 687 } 688 #endif // ASSERT 689 690 _par_cl->do_oop_nv(p); 691 692 if (CMSHeap::heap()->is_in_reserved(p)) { 693 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 694 _rs->write_ref_field_gc_par(p, obj); 695 } 696 } 697 698 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 699 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 700 701 // ParNewGeneration:: 702 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 703 DefNewGeneration::KeepAliveClosure(cl) {} 704 705 template <class T> 706 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 707 #ifdef ASSERT 708 { 709 assert(!oopDesc::is_null(*p), "expected non-null ref"); 710 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 711 // We never expect to see a null reference being processed 712 // as a weak reference. 713 assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); 714 } 715 #endif // ASSERT 716 717 _cl->do_oop_nv(p); 718 719 if (CMSHeap::heap()->is_in_reserved(p)) { 720 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 721 _rs->write_ref_field_gc_par(p, obj); 722 } 723 } 724 725 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 726 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 727 728 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 729 T heap_oop = oopDesc::load_heap_oop(p); 730 if (!oopDesc::is_null(heap_oop)) { 731 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 732 if ((HeapWord*)obj < _boundary) { 733 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 734 oop new_obj = obj->is_forwarded() 735 ? obj->forwardee() 736 : _g->DefNewGeneration::copy_to_survivor_space(obj); 737 oopDesc::encode_store_heap_oop_not_null(p, new_obj); 738 } 739 if (_gc_barrier) { 740 // If p points to a younger generation, mark the card. 741 if ((HeapWord*)obj < _gen_boundary) { 742 _rs->write_ref_field_gc_par(p, obj); 743 } 744 } 745 } 746 } 747 748 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 749 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 750 751 class ParNewRefProcTaskProxy: public AbstractGangTask { 752 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 753 public: 754 ParNewRefProcTaskProxy(ProcessTask& task, 755 ParNewGeneration& young_gen, 756 Generation& old_gen, 757 HeapWord* young_old_boundary, 758 ParScanThreadStateSet& state_set); 759 760 private: 761 virtual void work(uint worker_id); 762 private: 763 ParNewGeneration& _young_gen; 764 ProcessTask& _task; 765 Generation& _old_gen; 766 HeapWord* _young_old_boundary; 767 ParScanThreadStateSet& _state_set; 768 }; 769 770 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 771 ParNewGeneration& young_gen, 772 Generation& old_gen, 773 HeapWord* young_old_boundary, 774 ParScanThreadStateSet& state_set) 775 : AbstractGangTask("ParNewGeneration parallel reference processing"), 776 _young_gen(young_gen), 777 _task(task), 778 _old_gen(old_gen), 779 _young_old_boundary(young_old_boundary), 780 _state_set(state_set) 781 { } 782 783 void ParNewRefProcTaskProxy::work(uint worker_id) { 784 ResourceMark rm; 785 HandleMark hm; 786 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 787 par_scan_state.set_young_old_boundary(_young_old_boundary); 788 _task.work(worker_id, par_scan_state.is_alive_closure(), 789 par_scan_state.keep_alive_closure(), 790 par_scan_state.evacuate_followers_closure()); 791 } 792 793 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { 794 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 795 EnqueueTask& _task; 796 797 public: 798 ParNewRefEnqueueTaskProxy(EnqueueTask& task) 799 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), 800 _task(task) 801 { } 802 803 virtual void work(uint worker_id) { 804 _task.work(worker_id); 805 } 806 }; 807 808 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { 809 CMSHeap* gch = CMSHeap::heap(); 810 WorkGang* workers = gch->workers(); 811 assert(workers != NULL, "Need parallel worker threads."); 812 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed()); 813 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen, 814 _young_gen.reserved().end(), _state_set); 815 workers->run_task(&rp_task); 816 _state_set.reset(0 /* bad value in debug if not reset */, 817 _young_gen.promotion_failed()); 818 } 819 820 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { 821 CMSHeap* gch = CMSHeap::heap(); 822 WorkGang* workers = gch->workers(); 823 assert(workers != NULL, "Need parallel worker threads."); 824 ParNewRefEnqueueTaskProxy enq_task(task); 825 workers->run_task(&enq_task); 826 } 827 828 void ParNewRefProcTaskExecutor::set_single_threaded_mode() { 829 _state_set.flush(); 830 CMSHeap* heap = CMSHeap::heap(); 831 heap->save_marks(); 832 } 833 834 ScanClosureWithParBarrier:: 835 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 836 ScanClosure(g, gc_barrier) 837 { } 838 839 EvacuateFollowersClosureGeneral:: 840 EvacuateFollowersClosureGeneral(CMSHeap* heap, 841 OopsInGenClosure* cur, 842 OopsInGenClosure* older) : 843 _heap(heap), 844 _scan_cur_or_nonheap(cur), _scan_older(older) 845 { } 846 847 void EvacuateFollowersClosureGeneral::do_void() { 848 do { 849 // Beware: this call will lead to closure applications via virtual 850 // calls. 851 _heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, 852 _scan_cur_or_nonheap, 853 _scan_older); 854 } while (!_heap->no_allocs_since_save_marks()); 855 } 856 857 // A Generation that does parallel young-gen collection. 858 859 void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) { 860 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 861 _promo_failure_scan_stack.clear(true); // Clear cached segments. 862 863 remove_forwarding_pointers(); 864 log_info(gc, promotion)("Promotion failed"); 865 // All the spaces are in play for mark-sweep. 866 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 867 from()->set_next_compaction_space(to()); 868 gch->set_incremental_collection_failed(); 869 // Inform the next generation that a promotion failure occurred. 870 _old_gen->promotion_failure_occurred(); 871 872 // Trace promotion failure in the parallel GC threads 873 thread_state_set.trace_promotion_failed(gc_tracer()); 874 // Single threaded code may have reported promotion failure to the global state 875 if (_promotion_failed_info.has_failed()) { 876 _gc_tracer.report_promotion_failed(_promotion_failed_info); 877 } 878 // Reset the PromotionFailureALot counters. 879 NOT_PRODUCT(gch->reset_promotion_should_fail();) 880 } 881 882 void ParNewGeneration::collect(bool full, 883 bool clear_all_soft_refs, 884 size_t size, 885 bool is_tlab) { 886 assert(full || size > 0, "otherwise we don't want to collect"); 887 888 CMSHeap* gch = CMSHeap::heap(); 889 890 _gc_timer->register_gc_start(); 891 892 AdaptiveSizePolicy* size_policy = gch->size_policy(); 893 WorkGang* workers = gch->workers(); 894 assert(workers != NULL, "Need workgang for parallel work"); 895 uint active_workers = 896 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 897 workers->active_workers(), 898 Threads::number_of_non_daemon_threads()); 899 active_workers = workers->update_active_workers(active_workers); 900 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers()); 901 902 _old_gen = gch->old_gen(); 903 904 // If the next generation is too full to accommodate worst-case promotion 905 // from this generation, pass on collection; let the next generation 906 // do it. 907 if (!collection_attempt_is_safe()) { 908 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 909 return; 910 } 911 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 912 913 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 914 gch->trace_heap_before_gc(gc_tracer()); 915 916 init_assuming_no_promotion_failure(); 917 918 if (UseAdaptiveSizePolicy) { 919 set_survivor_overflow(false); 920 size_policy->minor_collection_begin(); 921 } 922 923 GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause()); 924 925 age_table()->clear(); 926 to()->clear(SpaceDecorator::Mangle); 927 928 gch->save_marks(); 929 930 // Set the correct parallelism (number of queues) in the reference processor 931 ref_processor()->set_active_mt_degree(active_workers); 932 933 // Need to initialize the preserved marks before the ThreadStateSet c'tor. 934 _preserved_marks_set.init(active_workers); 935 936 // Always set the terminator for the active number of workers 937 // because only those workers go through the termination protocol. 938 ParallelTaskTerminator _term(active_workers, task_queues()); 939 ParScanThreadStateSet thread_state_set(active_workers, 940 *to(), *this, *_old_gen, *task_queues(), 941 _overflow_stacks, _preserved_marks_set, 942 desired_plab_sz(), _term); 943 944 thread_state_set.reset(active_workers, promotion_failed()); 945 946 { 947 StrongRootsScope srs(active_workers); 948 949 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); 950 gch->rem_set()->prepare_for_younger_refs_iterate(true); 951 // It turns out that even when we're using 1 thread, doing the work in a 952 // separate thread causes wide variance in run times. We can't help this 953 // in the multi-threaded case, but we special-case n=1 here to get 954 // repeatable measurements of the 1-thread overhead of the parallel code. 955 // Might multiple workers ever be used? If yes, initialization 956 // has been done such that the single threaded path should not be used. 957 if (workers->total_workers() > 1) { 958 workers->run_task(&tsk); 959 } else { 960 tsk.work(0); 961 } 962 } 963 964 thread_state_set.reset(0 /* Bad value in debug if not reset */, 965 promotion_failed()); 966 967 // Trace and reset failed promotion info. 968 if (promotion_failed()) { 969 thread_state_set.trace_promotion_failed(gc_tracer()); 970 } 971 972 // Process (weak) reference objects found during scavenge. 973 ReferenceProcessor* rp = ref_processor(); 974 IsAliveClosure is_alive(this); 975 ScanWeakRefClosure scan_weak_ref(this); 976 KeepAliveClosure keep_alive(&scan_weak_ref); 977 ScanClosure scan_without_gc_barrier(this, false); 978 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 979 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 980 EvacuateFollowersClosureGeneral evacuate_followers(gch, 981 &scan_without_gc_barrier, &scan_with_gc_barrier); 982 rp->setup_policy(clear_all_soft_refs); 983 // Can the mt_degree be set later (at run_task() time would be best)? 984 rp->set_active_mt_degree(active_workers); 985 ReferenceProcessorStats stats; 986 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); 987 if (rp->processing_is_mt()) { 988 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 989 stats = rp->process_discovered_references(&is_alive, &keep_alive, 990 &evacuate_followers, &task_executor, 991 &pt); 992 } else { 993 thread_state_set.flush(); 994 gch->save_marks(); 995 stats = rp->process_discovered_references(&is_alive, &keep_alive, 996 &evacuate_followers, NULL, 997 &pt); 998 } 999 _gc_tracer.report_gc_reference_stats(stats); 1000 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1001 pt.print_all_references(); 1002 1003 assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point"); 1004 1005 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); 1006 1007 // Verify that the usage of keep_alive only forwarded 1008 // the oops and did not find anything new to copy. 1009 assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects"); 1010 1011 if (!promotion_failed()) { 1012 // Swap the survivor spaces. 1013 eden()->clear(SpaceDecorator::Mangle); 1014 from()->clear(SpaceDecorator::Mangle); 1015 if (ZapUnusedHeapArea) { 1016 // This is now done here because of the piece-meal mangling which 1017 // can check for valid mangling at intermediate points in the 1018 // collection(s). When a young collection fails to collect 1019 // sufficient space resizing of the young generation can occur 1020 // and redistribute the spaces in the young generation. Mangle 1021 // here so that unzapped regions don't get distributed to 1022 // other spaces. 1023 to()->mangle_unused_area(); 1024 } 1025 swap_spaces(); 1026 1027 // A successful scavenge should restart the GC time limit count which is 1028 // for full GC's. 1029 size_policy->reset_gc_overhead_limit_count(); 1030 1031 assert(to()->is_empty(), "to space should be empty now"); 1032 1033 adjust_desired_tenuring_threshold(); 1034 } else { 1035 handle_promotion_failed(gch, thread_state_set); 1036 } 1037 _preserved_marks_set.reclaim(); 1038 // set new iteration safe limit for the survivor spaces 1039 from()->set_concurrent_iteration_safe_limit(from()->top()); 1040 to()->set_concurrent_iteration_safe_limit(to()->top()); 1041 1042 plab_stats()->adjust_desired_plab_sz(); 1043 1044 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1045 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1046 1047 if (UseAdaptiveSizePolicy) { 1048 size_policy->minor_collection_end(gch->gc_cause()); 1049 size_policy->avg_survived()->sample(from()->used()); 1050 } 1051 1052 // We need to use a monotonically non-decreasing time in ms 1053 // or we will see time-warp warnings and os::javaTimeMillis() 1054 // does not guarantee monotonicity. 1055 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1056 update_time_of_last_gc(now); 1057 1058 rp->set_enqueuing_is_done(true); 1059 if (rp->processing_is_mt()) { 1060 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 1061 rp->enqueue_discovered_references(&task_executor, &pt); 1062 } else { 1063 rp->enqueue_discovered_references(NULL, &pt); 1064 } 1065 rp->verify_no_references_recorded(); 1066 1067 gch->trace_heap_after_gc(gc_tracer()); 1068 1069 pt.print_enqueue_phase(); 1070 1071 _gc_timer->register_gc_end(); 1072 1073 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1074 } 1075 1076 size_t ParNewGeneration::desired_plab_sz() { 1077 return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers()); 1078 } 1079 1080 static int sum; 1081 void ParNewGeneration::waste_some_time() { 1082 for (int i = 0; i < 100; i++) { 1083 sum += i; 1084 } 1085 } 1086 1087 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1088 1089 // Because of concurrency, there are times where an object for which 1090 // "is_forwarded()" is true contains an "interim" forwarding pointer 1091 // value. Such a value will soon be overwritten with a real value. 1092 // This method requires "obj" to have a forwarding pointer, and waits, if 1093 // necessary for a real one to be inserted, and returns it. 1094 1095 oop ParNewGeneration::real_forwardee(oop obj) { 1096 oop forward_ptr = obj->forwardee(); 1097 if (forward_ptr != ClaimedForwardPtr) { 1098 return forward_ptr; 1099 } else { 1100 return real_forwardee_slow(obj); 1101 } 1102 } 1103 1104 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1105 // Spin-read if it is claimed but not yet written by another thread. 1106 oop forward_ptr = obj->forwardee(); 1107 while (forward_ptr == ClaimedForwardPtr) { 1108 waste_some_time(); 1109 assert(obj->is_forwarded(), "precondition"); 1110 forward_ptr = obj->forwardee(); 1111 } 1112 return forward_ptr; 1113 } 1114 1115 // Multiple GC threads may try to promote an object. If the object 1116 // is successfully promoted, a forwarding pointer will be installed in 1117 // the object in the young generation. This method claims the right 1118 // to install the forwarding pointer before it copies the object, 1119 // thus avoiding the need to undo the copy as in 1120 // copy_to_survivor_space_avoiding_with_undo. 1121 1122 oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, 1123 oop old, 1124 size_t sz, 1125 markOop m) { 1126 // In the sequential version, this assert also says that the object is 1127 // not forwarded. That might not be the case here. It is the case that 1128 // the caller observed it to be not forwarded at some time in the past. 1129 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1130 1131 // The sequential code read "old->age()" below. That doesn't work here, 1132 // since the age is in the mark word, and that might be overwritten with 1133 // a forwarding pointer by a parallel thread. So we must save the mark 1134 // word in a local and then analyze it. 1135 oopDesc dummyOld; 1136 dummyOld.set_mark(m); 1137 assert(!dummyOld.is_forwarded(), 1138 "should not be called with forwarding pointer mark word."); 1139 1140 oop new_obj = NULL; 1141 oop forward_ptr; 1142 1143 // Try allocating obj in to-space (unless too old) 1144 if (dummyOld.age() < tenuring_threshold()) { 1145 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1146 if (new_obj == NULL) { 1147 set_survivor_overflow(true); 1148 } 1149 } 1150 1151 if (new_obj == NULL) { 1152 // Either to-space is full or we decided to promote try allocating obj tenured 1153 1154 // Attempt to install a null forwarding pointer (atomically), 1155 // to claim the right to install the real forwarding pointer. 1156 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1157 if (forward_ptr != NULL) { 1158 // someone else beat us to it. 1159 return real_forwardee(old); 1160 } 1161 1162 if (!_promotion_failed) { 1163 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1164 old, m, sz); 1165 } 1166 1167 if (new_obj == NULL) { 1168 // promotion failed, forward to self 1169 _promotion_failed = true; 1170 new_obj = old; 1171 1172 par_scan_state->preserved_marks()->push_if_necessary(old, m); 1173 par_scan_state->register_promotion_failure(sz); 1174 } 1175 1176 old->forward_to(new_obj); 1177 forward_ptr = NULL; 1178 } else { 1179 // Is in to-space; do copying ourselves. 1180 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1181 assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value."); 1182 forward_ptr = old->forward_to_atomic(new_obj); 1183 // Restore the mark word copied above. 1184 new_obj->set_mark(m); 1185 // Increment age if obj still in new generation 1186 new_obj->incr_age(); 1187 par_scan_state->age_table()->add(new_obj, sz); 1188 } 1189 assert(new_obj != NULL, "just checking"); 1190 1191 // This code must come after the CAS test, or it will print incorrect 1192 // information. 1193 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1194 is_in_reserved(new_obj) ? "copying" : "tenuring", 1195 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); 1196 1197 if (forward_ptr == NULL) { 1198 oop obj_to_push = new_obj; 1199 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1200 // Length field used as index of next element to be scanned. 1201 // Real length can be obtained from real_forwardee() 1202 arrayOop(old)->set_length(0); 1203 obj_to_push = old; 1204 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1205 "push forwarded object"); 1206 } 1207 // Push it on one of the queues of to-be-scanned objects. 1208 bool simulate_overflow = false; 1209 NOT_PRODUCT( 1210 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1211 // simulate a stack overflow 1212 simulate_overflow = true; 1213 } 1214 ) 1215 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1216 // Add stats for overflow pushes. 1217 log_develop_trace(gc)("Queue Overflow"); 1218 push_on_overflow_list(old, par_scan_state); 1219 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1220 } 1221 1222 return new_obj; 1223 } 1224 1225 // Oops. Someone beat us to it. Undo the allocation. Where did we 1226 // allocate it? 1227 if (is_in_reserved(new_obj)) { 1228 // Must be in to_space. 1229 assert(to()->is_in_reserved(new_obj), "Checking"); 1230 if (forward_ptr == ClaimedForwardPtr) { 1231 // Wait to get the real forwarding pointer value. 1232 forward_ptr = real_forwardee(old); 1233 } 1234 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1235 } 1236 1237 return forward_ptr; 1238 } 1239 1240 #ifndef PRODUCT 1241 // It's OK to call this multi-threaded; the worst thing 1242 // that can happen is that we'll get a bunch of closely 1243 // spaced simulated overflows, but that's OK, in fact 1244 // probably good as it would exercise the overflow code 1245 // under contention. 1246 bool ParNewGeneration::should_simulate_overflow() { 1247 if (_overflow_counter-- <= 0) { // just being defensive 1248 _overflow_counter = ParGCWorkQueueOverflowInterval; 1249 return true; 1250 } else { 1251 return false; 1252 } 1253 } 1254 #endif 1255 1256 // In case we are using compressed oops, we need to be careful. 1257 // If the object being pushed is an object array, then its length 1258 // field keeps track of the "grey boundary" at which the next 1259 // incremental scan will be done (see ParGCArrayScanChunk). 1260 // When using compressed oops, this length field is kept in the 1261 // lower 32 bits of the erstwhile klass word and cannot be used 1262 // for the overflow chaining pointer (OCP below). As such the OCP 1263 // would itself need to be compressed into the top 32-bits in this 1264 // case. Unfortunately, see below, in the event that we have a 1265 // promotion failure, the node to be pushed on the list can be 1266 // outside of the Java heap, so the heap-based pointer compression 1267 // would not work (we would have potential aliasing between C-heap 1268 // and Java-heap pointers). For this reason, when using compressed 1269 // oops, we simply use a worker-thread-local, non-shared overflow 1270 // list in the form of a growable array, with a slightly different 1271 // overflow stack draining strategy. If/when we start using fat 1272 // stacks here, we can go back to using (fat) pointer chains 1273 // (although some performance comparisons would be useful since 1274 // single global lists have their own performance disadvantages 1275 // as we were made painfully aware not long ago, see 6786503). 1276 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1277 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1278 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1279 if (ParGCUseLocalOverflow) { 1280 // In the case of compressed oops, we use a private, not-shared 1281 // overflow stack. 1282 par_scan_state->push_on_overflow_stack(from_space_obj); 1283 } else { 1284 assert(!UseCompressedOops, "Error"); 1285 // if the object has been forwarded to itself, then we cannot 1286 // use the klass pointer for the linked list. Instead we have 1287 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1288 // XXX This is horribly inefficient when a promotion failure occurs 1289 // and should be fixed. XXX FIX ME !!! 1290 #ifndef PRODUCT 1291 Atomic::inc(&_num_par_pushes); 1292 assert(_num_par_pushes > 0, "Tautology"); 1293 #endif 1294 if (from_space_obj->forwardee() == from_space_obj) { 1295 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1296 listhead->forward_to(from_space_obj); 1297 from_space_obj = listhead; 1298 } 1299 oop observed_overflow_list = _overflow_list; 1300 oop cur_overflow_list; 1301 do { 1302 cur_overflow_list = observed_overflow_list; 1303 if (cur_overflow_list != BUSY) { 1304 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1305 } else { 1306 from_space_obj->set_klass_to_list_ptr(NULL); 1307 } 1308 observed_overflow_list = 1309 Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list); 1310 } while (cur_overflow_list != observed_overflow_list); 1311 } 1312 } 1313 1314 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1315 bool res; 1316 1317 if (ParGCUseLocalOverflow) { 1318 res = par_scan_state->take_from_overflow_stack(); 1319 } else { 1320 assert(!UseCompressedOops, "Error"); 1321 res = take_from_overflow_list_work(par_scan_state); 1322 } 1323 return res; 1324 } 1325 1326 1327 // *NOTE*: The overflow list manipulation code here and 1328 // in CMSCollector:: are very similar in shape, 1329 // except that in the CMS case we thread the objects 1330 // directly into the list via their mark word, and do 1331 // not need to deal with special cases below related 1332 // to chunking of object arrays and promotion failure 1333 // handling. 1334 // CR 6797058 has been filed to attempt consolidation of 1335 // the common code. 1336 // Because of the common code, if you make any changes in 1337 // the code below, please check the CMS version to see if 1338 // similar changes might be needed. 1339 // See CMSCollector::par_take_from_overflow_list() for 1340 // more extensive documentation comments. 1341 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1342 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1343 // How many to take? 1344 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1345 (size_t)ParGCDesiredObjsFromOverflowList); 1346 1347 assert(!UseCompressedOops, "Error"); 1348 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1349 if (_overflow_list == NULL) return false; 1350 1351 // Otherwise, there was something there; try claiming the list. 1352 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 1353 // Trim off a prefix of at most objsFromOverflow items 1354 Thread* tid = Thread::current(); 1355 size_t spin_count = ParallelGCThreads; 1356 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1357 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1358 // someone grabbed it before we did ... 1359 // ... we spin for a short while... 1360 os::sleep(tid, sleep_time_millis, false); 1361 if (_overflow_list == NULL) { 1362 // nothing left to take 1363 return false; 1364 } else if (_overflow_list != BUSY) { 1365 // try and grab the prefix 1366 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 1367 } 1368 } 1369 if (prefix == NULL || prefix == BUSY) { 1370 // Nothing to take or waited long enough 1371 if (prefix == NULL) { 1372 // Write back the NULL in case we overwrote it with BUSY above 1373 // and it is still the same value. 1374 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 1375 } 1376 return false; 1377 } 1378 assert(prefix != NULL && prefix != BUSY, "Error"); 1379 oop cur = prefix; 1380 for (size_t i = 1; i < objsFromOverflow; ++i) { 1381 oop next = cur->list_ptr_from_klass(); 1382 if (next == NULL) break; 1383 cur = next; 1384 } 1385 assert(cur != NULL, "Loop postcondition"); 1386 1387 // Reattach remaining (suffix) to overflow list 1388 oop suffix = cur->list_ptr_from_klass(); 1389 if (suffix == NULL) { 1390 // Write back the NULL in lieu of the BUSY we wrote 1391 // above and it is still the same value. 1392 if (_overflow_list == BUSY) { 1393 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 1394 } 1395 } else { 1396 assert(suffix != BUSY, "Error"); 1397 // suffix will be put back on global list 1398 cur->set_klass_to_list_ptr(NULL); // break off suffix 1399 // It's possible that the list is still in the empty(busy) state 1400 // we left it in a short while ago; in that case we may be 1401 // able to place back the suffix. 1402 oop observed_overflow_list = _overflow_list; 1403 oop cur_overflow_list = observed_overflow_list; 1404 bool attached = false; 1405 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1406 observed_overflow_list = 1407 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); 1408 if (cur_overflow_list == observed_overflow_list) { 1409 attached = true; 1410 break; 1411 } else cur_overflow_list = observed_overflow_list; 1412 } 1413 if (!attached) { 1414 // Too bad, someone else got in in between; we'll need to do a splice. 1415 // Find the last item of suffix list 1416 oop last = suffix; 1417 while (true) { 1418 oop next = last->list_ptr_from_klass(); 1419 if (next == NULL) break; 1420 last = next; 1421 } 1422 // Atomically prepend suffix to current overflow list 1423 observed_overflow_list = _overflow_list; 1424 do { 1425 cur_overflow_list = observed_overflow_list; 1426 if (cur_overflow_list != BUSY) { 1427 // Do the splice ... 1428 last->set_klass_to_list_ptr(cur_overflow_list); 1429 } else { // cur_overflow_list == BUSY 1430 last->set_klass_to_list_ptr(NULL); 1431 } 1432 observed_overflow_list = 1433 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); 1434 } while (cur_overflow_list != observed_overflow_list); 1435 } 1436 } 1437 1438 // Push objects on prefix list onto this thread's work queue 1439 assert(prefix != NULL && prefix != BUSY, "program logic"); 1440 cur = prefix; 1441 ssize_t n = 0; 1442 while (cur != NULL) { 1443 oop obj_to_push = cur->forwardee(); 1444 oop next = cur->list_ptr_from_klass(); 1445 cur->set_klass(obj_to_push->klass()); 1446 // This may be an array object that is self-forwarded. In that case, the list pointer 1447 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1448 if (!is_in_reserved(cur)) { 1449 // This can become a scaling bottleneck when there is work queue overflow coincident 1450 // with promotion failure. 1451 oopDesc* f = cur; 1452 FREE_C_HEAP_ARRAY(oopDesc, f); 1453 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1454 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1455 obj_to_push = cur; 1456 } 1457 bool ok = work_q->push(obj_to_push); 1458 assert(ok, "Should have succeeded"); 1459 cur = next; 1460 n++; 1461 } 1462 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1463 #ifndef PRODUCT 1464 assert(_num_par_pushes >= n, "Too many pops?"); 1465 Atomic::sub(n, &_num_par_pushes); 1466 #endif 1467 return true; 1468 } 1469 #undef BUSY 1470 1471 void ParNewGeneration::ref_processor_init() { 1472 if (_ref_processor == NULL) { 1473 // Allocate and initialize a reference processor 1474 _ref_processor = 1475 new ReferenceProcessor(_reserved, // span 1476 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1477 ParallelGCThreads, // mt processing degree 1478 refs_discovery_is_mt(), // mt discovery 1479 ParallelGCThreads, // mt discovery degree 1480 refs_discovery_is_atomic(), // atomic_discovery 1481 NULL); // is_alive_non_header 1482 } 1483 } 1484 1485 const char* ParNewGeneration::name() const { 1486 return "par new generation"; 1487 } 1488 1489 void ParNewGeneration::restore_preserved_marks() { 1490 SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers()); 1491 _preserved_marks_set.restore(&task_executor); 1492 }