1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "gc/cms/cmsHeap.inline.hpp" 28 #include "gc/cms/compactibleFreeListSpace.hpp" 29 #include "gc/cms/concurrentMarkSweepGeneration.hpp" 30 #include "gc/cms/parNewGeneration.inline.hpp" 31 #include "gc/cms/parOopClosures.inline.hpp" 32 #include "gc/serial/defNewGeneration.inline.hpp" 33 #include "gc/shared/adaptiveSizePolicy.hpp" 34 #include "gc/shared/ageTable.inline.hpp" 35 #include "gc/shared/copyFailedInfo.hpp" 36 #include "gc/shared/gcHeapSummary.hpp" 37 #include "gc/shared/gcTimer.hpp" 38 #include "gc/shared/gcTrace.hpp" 39 #include "gc/shared/gcTraceTime.inline.hpp" 40 #include "gc/shared/genOopClosures.inline.hpp" 41 #include "gc/shared/generation.hpp" 42 #include "gc/shared/plab.inline.hpp" 43 #include "gc/shared/preservedMarks.inline.hpp" 44 #include "gc/shared/referencePolicy.hpp" 45 #include "gc/shared/space.hpp" 46 #include "gc/shared/spaceDecorator.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/weakProcessor.hpp" 50 #include "gc/shared/workgroup.hpp" 51 #include "logging/log.hpp" 52 #include "logging/logStream.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/access.inline.hpp" 55 #include "oops/compressedOops.inline.hpp" 56 #include "oops/objArrayOop.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "runtime/atomic.hpp" 59 #include "runtime/handles.hpp" 60 #include "runtime/handles.inline.hpp" 61 #include "runtime/java.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "utilities/copy.hpp" 64 #include "utilities/globalDefinitions.hpp" 65 #include "utilities/stack.inline.hpp" 66 67 ParScanThreadState::ParScanThreadState(Space* to_space_, 68 ParNewGeneration* young_gen_, 69 Generation* old_gen_, 70 int thread_num_, 71 ObjToScanQueueSet* work_queue_set_, 72 Stack<oop, mtGC>* overflow_stacks_, 73 PreservedMarks* preserved_marks_, 74 size_t desired_plab_sz_, 75 ParallelTaskTerminator& term_) : 76 _to_space(to_space_), 77 _old_gen(old_gen_), 78 _young_gen(young_gen_), 79 _thread_num(thread_num_), 80 _work_queue(work_queue_set_->queue(thread_num_)), 81 _to_space_full(false), 82 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 83 _preserved_marks(preserved_marks_), 84 _ageTable(false), // false ==> not the global age table, no perf data. 85 _to_space_alloc_buffer(desired_plab_sz_), 86 _to_space_closure(young_gen_, this), 87 _old_gen_closure(young_gen_, this), 88 _to_space_root_closure(young_gen_, this), 89 _old_gen_root_closure(young_gen_, this), 90 _older_gen_closure(young_gen_, this), 91 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, 92 &_to_space_root_closure, young_gen_, &_old_gen_root_closure, 93 work_queue_set_, &term_), 94 _is_alive_closure(young_gen_), 95 _scan_weak_ref_closure(young_gen_, this), 96 _keep_alive_closure(&_scan_weak_ref_closure), 97 _strong_roots_time(0.0), 98 _term_time(0.0) 99 { 100 #if TASKQUEUE_STATS 101 _term_attempts = 0; 102 _overflow_refills = 0; 103 _overflow_refill_objs = 0; 104 #endif // TASKQUEUE_STATS 105 106 _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); 107 _hash_seed = 17; // Might want to take time-based random value. 108 _start = os::elapsedTime(); 109 _old_gen_closure.set_generation(old_gen_); 110 _old_gen_root_closure.set_generation(old_gen_); 111 } 112 113 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, 114 size_t plab_word_size) { 115 ChunkArray* sca = survivor_chunk_array(); 116 if (sca != NULL) { 117 // A non-null SCA implies that we want the PLAB data recorded. 118 sca->record_sample(plab_start, plab_word_size); 119 } 120 } 121 122 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { 123 return new_obj->is_objArray() && 124 arrayOop(new_obj)->length() > ParGCArrayScanChunk && 125 new_obj != old_obj; 126 } 127 128 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { 129 assert(old->is_objArray(), "must be obj array"); 130 assert(old->is_forwarded(), "must be forwarded"); 131 assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap."); 132 assert(!old_gen()->is_in(old), "must be in young generation."); 133 134 objArrayOop obj = objArrayOop(old->forwardee()); 135 // Process ParGCArrayScanChunk elements now 136 // and push the remainder back onto queue 137 int start = arrayOop(old)->length(); 138 int end = obj->length(); 139 int remainder = end - start; 140 assert(start <= end, "just checking"); 141 if (remainder > 2 * ParGCArrayScanChunk) { 142 // Test above combines last partial chunk with a full chunk 143 end = start + ParGCArrayScanChunk; 144 arrayOop(old)->set_length(end); 145 // Push remainder. 146 bool ok = work_queue()->push(old); 147 assert(ok, "just popped, push must be okay"); 148 } else { 149 // Restore length so that it can be used if there 150 // is a promotion failure and forwarding pointers 151 // must be removed. 152 arrayOop(old)->set_length(end); 153 } 154 155 // process our set of indices (include header in first chunk) 156 // should make sure end is even (aligned to HeapWord in case of compressed oops) 157 if ((HeapWord *)obj < young_old_boundary()) { 158 // object is in to_space 159 obj->oop_iterate_range(&_to_space_closure, start, end); 160 } else { 161 // object is in old generation 162 obj->oop_iterate_range(&_old_gen_closure, start, end); 163 } 164 } 165 166 void ParScanThreadState::trim_queues(int max_size) { 167 ObjToScanQueue* queue = work_queue(); 168 do { 169 while (queue->size() > (juint)max_size) { 170 oop obj_to_scan; 171 if (queue->pop_local(obj_to_scan)) { 172 if ((HeapWord *)obj_to_scan < young_old_boundary()) { 173 if (obj_to_scan->is_objArray() && 174 obj_to_scan->is_forwarded() && 175 obj_to_scan->forwardee() != obj_to_scan) { 176 scan_partial_array_and_push_remainder(obj_to_scan); 177 } else { 178 // object is in to_space 179 obj_to_scan->oop_iterate(&_to_space_closure); 180 } 181 } else { 182 // object is in old generation 183 obj_to_scan->oop_iterate(&_old_gen_closure); 184 } 185 } 186 } 187 // For the case of compressed oops, we have a private, non-shared 188 // overflow stack, so we eagerly drain it so as to more evenly 189 // distribute load early. Note: this may be good to do in 190 // general rather than delay for the final stealing phase. 191 // If applicable, we'll transfer a set of objects over to our 192 // work queue, allowing them to be stolen and draining our 193 // private overflow stack. 194 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); 195 } 196 197 bool ParScanThreadState::take_from_overflow_stack() { 198 assert(ParGCUseLocalOverflow, "Else should not call"); 199 assert(young_gen()->overflow_list() == NULL, "Error"); 200 ObjToScanQueue* queue = work_queue(); 201 Stack<oop, mtGC>* const of_stack = overflow_stack(); 202 const size_t num_overflow_elems = of_stack->size(); 203 const size_t space_available = queue->max_elems() - queue->size(); 204 const size_t num_take_elems = MIN3(space_available / 4, 205 ParGCDesiredObjsFromOverflowList, 206 num_overflow_elems); 207 // Transfer the most recent num_take_elems from the overflow 208 // stack to our work queue. 209 for (size_t i = 0; i != num_take_elems; i++) { 210 oop cur = of_stack->pop(); 211 oop obj_to_push = cur->forwardee(); 212 assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap"); 213 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); 214 assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap"); 215 if (should_be_partially_scanned(obj_to_push, cur)) { 216 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 217 obj_to_push = cur; 218 } 219 bool ok = queue->push(obj_to_push); 220 assert(ok, "Should have succeeded"); 221 } 222 assert(young_gen()->overflow_list() == NULL, "Error"); 223 return num_take_elems > 0; // was something transferred? 224 } 225 226 void ParScanThreadState::push_on_overflow_stack(oop p) { 227 assert(ParGCUseLocalOverflow, "Else should not call"); 228 overflow_stack()->push(p); 229 assert(young_gen()->overflow_list() == NULL, "Error"); 230 } 231 232 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { 233 // If the object is small enough, try to reallocate the buffer. 234 HeapWord* obj = NULL; 235 if (!_to_space_full) { 236 PLAB* const plab = to_space_alloc_buffer(); 237 Space* const sp = to_space(); 238 if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { 239 // Is small enough; abandon this buffer and start a new one. 240 plab->retire(); 241 // The minimum size has to be twice SurvivorAlignmentInBytes to 242 // allow for padding used in the alignment of 1 word. A padding 243 // of 1 is too small for a filler word so the padding size will 244 // be increased by SurvivorAlignmentInBytes. 245 size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize); 246 size_t buf_size = MAX2(plab->word_sz(), min_usable_size); 247 HeapWord* buf_space = sp->par_allocate(buf_size); 248 if (buf_space == NULL) { 249 const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize; 250 size_t free_bytes = sp->free(); 251 while(buf_space == NULL && free_bytes >= min_bytes) { 252 buf_size = free_bytes >> LogHeapWordSize; 253 assert(buf_size == (size_t)align_object_size(buf_size), "Invariant"); 254 buf_space = sp->par_allocate(buf_size); 255 free_bytes = sp->free(); 256 } 257 } 258 if (buf_space != NULL) { 259 plab->set_buf(buf_space, buf_size); 260 record_survivor_plab(buf_space, buf_size); 261 obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); 262 // Note that we cannot compare buf_size < word_sz below 263 // because of AlignmentReserve (see PLAB::allocate()). 264 assert(obj != NULL || plab->words_remaining() < word_sz, 265 "Else should have been able to allocate requested object size " 266 SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes " 267 SIZE_FORMAT ", words_remaining " SIZE_FORMAT, 268 word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining()); 269 // It's conceivable that we may be able to use the 270 // buffer we just grabbed for subsequent small requests 271 // even if not for this one. 272 } else { 273 // We're used up. 274 _to_space_full = true; 275 } 276 } else { 277 // Too large; allocate the object individually. 278 obj = sp->par_allocate(word_sz); 279 } 280 } 281 return obj; 282 } 283 284 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { 285 to_space_alloc_buffer()->undo_allocation(obj, word_sz); 286 } 287 288 void ParScanThreadState::print_promotion_failure_size() { 289 if (_promotion_failed_info.has_failed()) { 290 log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ", 291 _thread_num, _promotion_failed_info.first_size()); 292 } 293 } 294 295 class ParScanThreadStateSet: StackObj { 296 public: 297 // Initializes states for the specified number of threads; 298 ParScanThreadStateSet(int num_threads, 299 Space& to_space, 300 ParNewGeneration& young_gen, 301 Generation& old_gen, 302 ObjToScanQueueSet& queue_set, 303 Stack<oop, mtGC>* overflow_stacks_, 304 PreservedMarksSet& preserved_marks_set, 305 size_t desired_plab_sz, 306 ParallelTaskTerminator& term); 307 308 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 309 310 inline ParScanThreadState& thread_state(int i); 311 312 void trace_promotion_failed(const YoungGCTracer* gc_tracer); 313 void reset(uint active_workers, bool promotion_failed); 314 void flush(); 315 316 #if TASKQUEUE_STATS 317 static void 318 print_termination_stats_hdr(outputStream* const st); 319 void print_termination_stats(); 320 static void 321 print_taskqueue_stats_hdr(outputStream* const st); 322 void print_taskqueue_stats(); 323 void reset_stats(); 324 #endif // TASKQUEUE_STATS 325 326 private: 327 ParallelTaskTerminator& _term; 328 ParNewGeneration& _young_gen; 329 Generation& _old_gen; 330 ParScanThreadState* _per_thread_states; 331 const int _num_threads; 332 public: 333 bool is_valid(int id) const { return id < _num_threads; } 334 ParallelTaskTerminator* terminator() { return &_term; } 335 }; 336 337 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, 338 Space& to_space, 339 ParNewGeneration& young_gen, 340 Generation& old_gen, 341 ObjToScanQueueSet& queue_set, 342 Stack<oop, mtGC>* overflow_stacks, 343 PreservedMarksSet& preserved_marks_set, 344 size_t desired_plab_sz, 345 ParallelTaskTerminator& term) 346 : _young_gen(young_gen), 347 _old_gen(old_gen), 348 _term(term), 349 _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)), 350 _num_threads(num_threads) 351 { 352 assert(num_threads > 0, "sanity check!"); 353 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), 354 "overflow_stack allocation mismatch"); 355 // Initialize states. 356 for (int i = 0; i < num_threads; ++i) { 357 new(_per_thread_states + i) 358 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, 359 overflow_stacks, preserved_marks_set.get(i), 360 desired_plab_sz, term); 361 } 362 } 363 364 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { 365 assert(i >= 0 && i < _num_threads, "sanity check!"); 366 return _per_thread_states[i]; 367 } 368 369 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { 370 for (int i = 0; i < _num_threads; ++i) { 371 if (thread_state(i).promotion_failed()) { 372 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); 373 thread_state(i).promotion_failed_info().reset(); 374 } 375 } 376 } 377 378 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { 379 _term.reset_for_reuse(active_threads); 380 if (promotion_failed) { 381 for (int i = 0; i < _num_threads; ++i) { 382 thread_state(i).print_promotion_failure_size(); 383 } 384 } 385 } 386 387 #if TASKQUEUE_STATS 388 void ParScanThreadState::reset_stats() { 389 taskqueue_stats().reset(); 390 _term_attempts = 0; 391 _overflow_refills = 0; 392 _overflow_refill_objs = 0; 393 } 394 395 void ParScanThreadStateSet::reset_stats() { 396 for (int i = 0; i < _num_threads; ++i) { 397 thread_state(i).reset_stats(); 398 } 399 } 400 401 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { 402 st->print_raw_cr("GC Termination Stats"); 403 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"); 404 st->print_raw_cr("thr ms ms % ms % attempts"); 405 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); 406 } 407 408 void ParScanThreadStateSet::print_termination_stats() { 409 Log(gc, task, stats) log; 410 if (!log.is_debug()) { 411 return; 412 } 413 414 ResourceMark rm; 415 LogStream ls(log.debug()); 416 outputStream* st = &ls; 417 418 print_termination_stats_hdr(st); 419 420 for (int i = 0; i < _num_threads; ++i) { 421 const ParScanThreadState & pss = thread_state(i); 422 const double elapsed_ms = pss.elapsed_time() * 1000.0; 423 const double s_roots_ms = pss.strong_roots_time() * 1000.0; 424 const double term_ms = pss.term_time() * 1000.0; 425 st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), 426 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 427 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); 428 } 429 } 430 431 // Print stats related to work queue activity. 432 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { 433 st->print_raw_cr("GC Task Stats"); 434 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 435 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 436 } 437 438 void ParScanThreadStateSet::print_taskqueue_stats() { 439 if (!log_develop_is_enabled(Trace, gc, task, stats)) { 440 return; 441 } 442 Log(gc, task, stats) log; 443 ResourceMark rm; 444 LogStream ls(log.trace()); 445 outputStream* st = &ls; 446 print_taskqueue_stats_hdr(st); 447 448 TaskQueueStats totals; 449 for (int i = 0; i < _num_threads; ++i) { 450 const ParScanThreadState & pss = thread_state(i); 451 const TaskQueueStats & stats = pss.taskqueue_stats(); 452 st->print("%3d ", i); stats.print(st); st->cr(); 453 totals += stats; 454 455 if (pss.overflow_refills() > 0) { 456 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " 457 SIZE_FORMAT_W(10) " overflow objects", 458 pss.overflow_refills(), pss.overflow_refill_objs()); 459 } 460 } 461 st->print("tot "); totals.print(st); st->cr(); 462 463 DEBUG_ONLY(totals.verify()); 464 } 465 #endif // TASKQUEUE_STATS 466 467 void ParScanThreadStateSet::flush() { 468 // Work in this loop should be kept as lightweight as 469 // possible since this might otherwise become a bottleneck 470 // to scaling. Should we add heavy-weight work into this 471 // loop, consider parallelizing the loop into the worker threads. 472 for (int i = 0; i < _num_threads; ++i) { 473 ParScanThreadState& par_scan_state = thread_state(i); 474 475 // Flush stats related to To-space PLAB activity and 476 // retire the last buffer. 477 par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); 478 479 // Every thread has its own age table. We need to merge 480 // them all into one. 481 AgeTable *local_table = par_scan_state.age_table(); 482 _young_gen.age_table()->merge(local_table); 483 484 // Inform old gen that we're done. 485 _old_gen.par_promote_alloc_done(i); 486 } 487 488 if (UseConcMarkSweepGC) { 489 // We need to call this even when ResizeOldPLAB is disabled 490 // so as to avoid breaking some asserts. While we may be able 491 // to avoid this by reorganizing the code a bit, I am loathe 492 // to do that unless we find cases where ergo leads to bad 493 // performance. 494 CompactibleFreeListSpaceLAB::compute_desired_plab_size(); 495 } 496 } 497 498 ParScanClosure::ParScanClosure(ParNewGeneration* g, 499 ParScanThreadState* par_scan_state) : 500 OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { 501 _boundary = _g->reserved().end(); 502 } 503 504 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } 505 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } 506 507 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } 508 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } 509 510 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } 511 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } 512 513 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } 514 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } 515 516 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, 517 ParScanThreadState* par_scan_state) 518 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) 519 {} 520 521 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } 522 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } 523 524 #ifdef WIN32 525 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ 526 #endif 527 528 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( 529 ParScanThreadState* par_scan_state_, 530 ParScanWithoutBarrierClosure* to_space_closure_, 531 ParScanWithBarrierClosure* old_gen_closure_, 532 ParRootScanWithoutBarrierClosure* to_space_root_closure_, 533 ParNewGeneration* par_gen_, 534 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, 535 ObjToScanQueueSet* task_queues_, 536 ParallelTaskTerminator* terminator_) : 537 538 _par_scan_state(par_scan_state_), 539 _to_space_closure(to_space_closure_), 540 _old_gen_closure(old_gen_closure_), 541 _to_space_root_closure(to_space_root_closure_), 542 _old_gen_root_closure(old_gen_root_closure_), 543 _par_gen(par_gen_), 544 _task_queues(task_queues_), 545 _terminator(terminator_) 546 {} 547 548 void ParEvacuateFollowersClosure::do_void() { 549 ObjToScanQueue* work_q = par_scan_state()->work_queue(); 550 551 while (true) { 552 // Scan to-space and old-gen objs until we run out of both. 553 oop obj_to_scan; 554 par_scan_state()->trim_queues(0); 555 556 // We have no local work, attempt to steal from other threads. 557 558 // Attempt to steal work from promoted. 559 if (task_queues()->steal(par_scan_state()->thread_num(), 560 par_scan_state()->hash_seed(), 561 obj_to_scan)) { 562 bool res = work_q->push(obj_to_scan); 563 assert(res, "Empty queue should have room for a push."); 564 565 // If successful, goto Start. 566 continue; 567 568 // Try global overflow list. 569 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { 570 continue; 571 } 572 573 // Otherwise, offer termination. 574 par_scan_state()->start_term_time(); 575 if (terminator()->offer_termination()) break; 576 par_scan_state()->end_term_time(); 577 } 578 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, 579 "Broken overflow list?"); 580 // Finish the last termination pause. 581 par_scan_state()->end_term_time(); 582 } 583 584 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, 585 Generation* old_gen, 586 HeapWord* young_old_boundary, 587 ParScanThreadStateSet* state_set, 588 StrongRootsScope* strong_roots_scope) : 589 AbstractGangTask("ParNewGeneration collection"), 590 _young_gen(young_gen), _old_gen(old_gen), 591 _young_old_boundary(young_old_boundary), 592 _state_set(state_set), 593 _strong_roots_scope(strong_roots_scope), 594 _par_state_string(StringTable::weak_storage()) 595 {} 596 597 void ParNewGenTask::work(uint worker_id) { 598 CMSHeap* heap = CMSHeap::heap(); 599 // Since this is being done in a separate thread, need new resource 600 // and handle marks. 601 ResourceMark rm; 602 HandleMark hm; 603 604 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); 605 assert(_state_set->is_valid(worker_id), "Should not have been called"); 606 607 par_scan_state.set_young_old_boundary(_young_old_boundary); 608 609 CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(), 610 heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); 611 612 par_scan_state.start_strong_roots(); 613 heap->young_process_roots(_strong_roots_scope, 614 &par_scan_state.to_space_root_closure(), 615 &par_scan_state.older_gen_closure(), 616 &cld_scan_closure, 617 &_par_state_string); 618 619 par_scan_state.end_strong_roots(); 620 621 // "evacuate followers". 622 par_scan_state.evacuate_followers_closure().do_void(); 623 624 // This will collapse this worker's promoted object list that's 625 // created during the main ParNew parallel phase of ParNew. This has 626 // to be called after all workers have finished promoting objects 627 // and scanning promoted objects. It should be safe calling it from 628 // here, given that we can only reach here after all thread have 629 // offered termination, i.e., after there is no more work to be 630 // done. It will also disable promotion tracking for the rest of 631 // this GC as it's not necessary to be on during reference processing. 632 _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id); 633 } 634 635 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) 636 : DefNewGeneration(rs, initial_byte_size, "PCopy"), 637 _overflow_list(NULL), 638 _is_alive_closure(this), 639 _plab_stats("Young", YoungPLABSize, PLABWeight) 640 { 641 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) 642 NOT_PRODUCT(_num_par_pushes = 0;) 643 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); 644 guarantee(_task_queues != NULL, "task_queues allocation failure."); 645 646 for (uint i = 0; i < ParallelGCThreads; i++) { 647 ObjToScanQueue *q = new ObjToScanQueue(); 648 guarantee(q != NULL, "work_queue Allocation failure."); 649 _task_queues->register_queue(i, q); 650 } 651 652 for (uint i = 0; i < ParallelGCThreads; i++) { 653 _task_queues->queue(i)->initialize(); 654 } 655 656 _overflow_stacks = NULL; 657 if (ParGCUseLocalOverflow) { 658 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' 659 typedef Stack<oop, mtGC> GCOopStack; 660 661 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); 662 for (size_t i = 0; i < ParallelGCThreads; ++i) { 663 new (_overflow_stacks + i) Stack<oop, mtGC>(); 664 } 665 } 666 667 if (UsePerfData) { 668 EXCEPTION_MARK; 669 ResourceMark rm; 670 671 const char* cname = 672 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); 673 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, 674 ParallelGCThreads, CHECK); 675 } 676 } 677 678 // ParNewGeneration:: 679 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : 680 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} 681 682 template <class T> 683 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { 684 #ifdef ASSERT 685 { 686 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p); 687 // We never expect to see a null reference being processed 688 // as a weak reference. 689 assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); 690 } 691 #endif // ASSERT 692 693 _par_cl->do_oop_nv(p); 694 695 if (CMSHeap::heap()->is_in_reserved(p)) { 696 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);; 697 _rs->write_ref_field_gc_par(p, obj); 698 } 699 } 700 701 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } 702 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } 703 704 // ParNewGeneration:: 705 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : 706 DefNewGeneration::KeepAliveClosure(cl) {} 707 708 template <class T> 709 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { 710 #ifdef ASSERT 711 { 712 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p); 713 // We never expect to see a null reference being processed 714 // as a weak reference. 715 assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); 716 } 717 #endif // ASSERT 718 719 _cl->do_oop_nv(p); 720 721 if (CMSHeap::heap()->is_in_reserved(p)) { 722 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p); 723 _rs->write_ref_field_gc_par(p, obj); 724 } 725 } 726 727 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } 728 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } 729 730 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { 731 T heap_oop = RawAccess<>::oop_load(p); 732 if (!CompressedOops::is_null(heap_oop)) { 733 oop obj = CompressedOops::decode_not_null(heap_oop); 734 if ((HeapWord*)obj < _boundary) { 735 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); 736 oop new_obj = obj->is_forwarded() 737 ? obj->forwardee() 738 : _g->DefNewGeneration::copy_to_survivor_space(obj); 739 RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj); 740 } 741 if (_gc_barrier) { 742 // If p points to a younger generation, mark the card. 743 if ((HeapWord*)obj < _gen_boundary) { 744 _rs->write_ref_field_gc_par(p, obj); 745 } 746 } 747 } 748 } 749 750 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 751 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } 752 753 class ParNewRefProcTaskProxy: public AbstractGangTask { 754 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 755 public: 756 ParNewRefProcTaskProxy(ProcessTask& task, 757 ParNewGeneration& young_gen, 758 Generation& old_gen, 759 HeapWord* young_old_boundary, 760 ParScanThreadStateSet& state_set); 761 762 private: 763 virtual void work(uint worker_id); 764 private: 765 ParNewGeneration& _young_gen; 766 ProcessTask& _task; 767 Generation& _old_gen; 768 HeapWord* _young_old_boundary; 769 ParScanThreadStateSet& _state_set; 770 }; 771 772 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, 773 ParNewGeneration& young_gen, 774 Generation& old_gen, 775 HeapWord* young_old_boundary, 776 ParScanThreadStateSet& state_set) 777 : AbstractGangTask("ParNewGeneration parallel reference processing"), 778 _young_gen(young_gen), 779 _task(task), 780 _old_gen(old_gen), 781 _young_old_boundary(young_old_boundary), 782 _state_set(state_set) 783 { } 784 785 void ParNewRefProcTaskProxy::work(uint worker_id) { 786 ResourceMark rm; 787 HandleMark hm; 788 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); 789 par_scan_state.set_young_old_boundary(_young_old_boundary); 790 _task.work(worker_id, par_scan_state.is_alive_closure(), 791 par_scan_state.keep_alive_closure(), 792 par_scan_state.evacuate_followers_closure()); 793 } 794 795 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { 796 CMSHeap* gch = CMSHeap::heap(); 797 WorkGang* workers = gch->workers(); 798 assert(workers != NULL, "Need parallel worker threads."); 799 _state_set.reset(workers->active_workers(), _young_gen.promotion_failed()); 800 ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen, 801 _young_gen.reserved().end(), _state_set); 802 workers->run_task(&rp_task); 803 _state_set.reset(0 /* bad value in debug if not reset */, 804 _young_gen.promotion_failed()); 805 } 806 807 void ParNewRefProcTaskExecutor::set_single_threaded_mode() { 808 _state_set.flush(); 809 CMSHeap* heap = CMSHeap::heap(); 810 heap->save_marks(); 811 } 812 813 ScanClosureWithParBarrier:: 814 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : 815 ScanClosure(g, gc_barrier) 816 { } 817 818 template <typename OopClosureType1, typename OopClosureType2> 819 EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>:: 820 EvacuateFollowersClosureGeneral(CMSHeap* heap, 821 OopClosureType1* cur, 822 OopClosureType2* older) : 823 _heap(heap), 824 _scan_cur_or_nonheap(cur), _scan_older(older) 825 { } 826 827 template <typename OopClosureType1, typename OopClosureType2> 828 void EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::do_void() { 829 do { 830 _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, 831 _scan_older); 832 } while (!_heap->no_allocs_since_save_marks()); 833 } 834 835 // A Generation that does parallel young-gen collection. 836 837 void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) { 838 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 839 _promo_failure_scan_stack.clear(true); // Clear cached segments. 840 841 remove_forwarding_pointers(); 842 log_info(gc, promotion)("Promotion failed"); 843 // All the spaces are in play for mark-sweep. 844 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. 845 from()->set_next_compaction_space(to()); 846 gch->set_incremental_collection_failed(); 847 // Inform the next generation that a promotion failure occurred. 848 _old_gen->promotion_failure_occurred(); 849 850 // Trace promotion failure in the parallel GC threads 851 thread_state_set.trace_promotion_failed(gc_tracer()); 852 // Single threaded code may have reported promotion failure to the global state 853 if (_promotion_failed_info.has_failed()) { 854 _gc_tracer.report_promotion_failed(_promotion_failed_info); 855 } 856 // Reset the PromotionFailureALot counters. 857 NOT_PRODUCT(gch->reset_promotion_should_fail();) 858 } 859 860 void ParNewGeneration::collect(bool full, 861 bool clear_all_soft_refs, 862 size_t size, 863 bool is_tlab) { 864 assert(full || size > 0, "otherwise we don't want to collect"); 865 866 CMSHeap* gch = CMSHeap::heap(); 867 868 _gc_timer->register_gc_start(); 869 870 AdaptiveSizePolicy* size_policy = gch->size_policy(); 871 WorkGang* workers = gch->workers(); 872 assert(workers != NULL, "Need workgang for parallel work"); 873 uint active_workers = 874 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), 875 workers->active_workers(), 876 Threads::number_of_non_daemon_threads()); 877 active_workers = workers->update_active_workers(active_workers); 878 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers()); 879 880 _old_gen = gch->old_gen(); 881 882 // If the next generation is too full to accommodate worst-case promotion 883 // from this generation, pass on collection; let the next generation 884 // do it. 885 if (!collection_attempt_is_safe()) { 886 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one 887 return; 888 } 889 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 890 891 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 892 gch->trace_heap_before_gc(gc_tracer()); 893 894 init_assuming_no_promotion_failure(); 895 896 if (UseAdaptiveSizePolicy) { 897 set_survivor_overflow(false); 898 size_policy->minor_collection_begin(); 899 } 900 901 GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause()); 902 903 age_table()->clear(); 904 to()->clear(SpaceDecorator::Mangle); 905 906 gch->save_marks(); 907 908 // Set the correct parallelism (number of queues) in the reference processor 909 ref_processor()->set_active_mt_degree(active_workers); 910 911 // Need to initialize the preserved marks before the ThreadStateSet c'tor. 912 _preserved_marks_set.init(active_workers); 913 914 // Always set the terminator for the active number of workers 915 // because only those workers go through the termination protocol. 916 ParallelTaskTerminator _term(active_workers, task_queues()); 917 ParScanThreadStateSet thread_state_set(active_workers, 918 *to(), *this, *_old_gen, *task_queues(), 919 _overflow_stacks, _preserved_marks_set, 920 desired_plab_sz(), _term); 921 922 thread_state_set.reset(active_workers, promotion_failed()); 923 924 { 925 StrongRootsScope srs(active_workers); 926 927 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); 928 gch->rem_set()->prepare_for_younger_refs_iterate(true); 929 // It turns out that even when we're using 1 thread, doing the work in a 930 // separate thread causes wide variance in run times. We can't help this 931 // in the multi-threaded case, but we special-case n=1 here to get 932 // repeatable measurements of the 1-thread overhead of the parallel code. 933 // Might multiple workers ever be used? If yes, initialization 934 // has been done such that the single threaded path should not be used. 935 if (workers->total_workers() > 1) { 936 workers->run_task(&tsk); 937 } else { 938 tsk.work(0); 939 } 940 } 941 942 thread_state_set.reset(0 /* Bad value in debug if not reset */, 943 promotion_failed()); 944 945 // Trace and reset failed promotion info. 946 if (promotion_failed()) { 947 thread_state_set.trace_promotion_failed(gc_tracer()); 948 } 949 950 // Process (weak) reference objects found during scavenge. 951 ReferenceProcessor* rp = ref_processor(); 952 IsAliveClosure is_alive(this); 953 ScanWeakRefClosure scan_weak_ref(this); 954 KeepAliveClosure keep_alive(&scan_weak_ref); 955 ScanClosure scan_without_gc_barrier(this, false); 956 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); 957 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); 958 EvacuateFollowersClosureGeneral<ScanClosure, ScanClosureWithParBarrier> evacuate_followers( 959 gch, &scan_without_gc_barrier, &scan_with_gc_barrier); 960 rp->setup_policy(clear_all_soft_refs); 961 // Can the mt_degree be set later (at run_task() time would be best)? 962 rp->set_active_mt_degree(active_workers); 963 ReferenceProcessorStats stats; 964 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues()); 965 if (rp->processing_is_mt()) { 966 ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); 967 stats = rp->process_discovered_references(&is_alive, &keep_alive, 968 &evacuate_followers, &task_executor, 969 &pt); 970 } else { 971 thread_state_set.flush(); 972 gch->save_marks(); 973 stats = rp->process_discovered_references(&is_alive, &keep_alive, 974 &evacuate_followers, NULL, 975 &pt); 976 } 977 _gc_tracer.report_gc_reference_stats(stats); 978 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 979 pt.print_all_references(); 980 981 assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point"); 982 983 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); 984 985 // Verify that the usage of keep_alive only forwarded 986 // the oops and did not find anything new to copy. 987 assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects"); 988 989 if (!promotion_failed()) { 990 // Swap the survivor spaces. 991 eden()->clear(SpaceDecorator::Mangle); 992 from()->clear(SpaceDecorator::Mangle); 993 if (ZapUnusedHeapArea) { 994 // This is now done here because of the piece-meal mangling which 995 // can check for valid mangling at intermediate points in the 996 // collection(s). When a young collection fails to collect 997 // sufficient space resizing of the young generation can occur 998 // and redistribute the spaces in the young generation. Mangle 999 // here so that unzapped regions don't get distributed to 1000 // other spaces. 1001 to()->mangle_unused_area(); 1002 } 1003 swap_spaces(); 1004 1005 // A successful scavenge should restart the GC time limit count which is 1006 // for full GC's. 1007 size_policy->reset_gc_overhead_limit_count(); 1008 1009 assert(to()->is_empty(), "to space should be empty now"); 1010 1011 adjust_desired_tenuring_threshold(); 1012 } else { 1013 handle_promotion_failed(gch, thread_state_set); 1014 } 1015 _preserved_marks_set.reclaim(); 1016 // set new iteration safe limit for the survivor spaces 1017 from()->set_concurrent_iteration_safe_limit(from()->top()); 1018 to()->set_concurrent_iteration_safe_limit(to()->top()); 1019 1020 plab_stats()->adjust_desired_plab_sz(); 1021 1022 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); 1023 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); 1024 1025 if (UseAdaptiveSizePolicy) { 1026 size_policy->minor_collection_end(gch->gc_cause()); 1027 size_policy->avg_survived()->sample(from()->used()); 1028 } 1029 1030 // We need to use a monotonically non-decreasing time in ms 1031 // or we will see time-warp warnings and os::javaTimeMillis() 1032 // does not guarantee monotonicity. 1033 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1034 update_time_of_last_gc(now); 1035 1036 rp->set_enqueuing_is_done(true); 1037 rp->verify_no_references_recorded(); 1038 1039 gch->trace_heap_after_gc(gc_tracer()); 1040 1041 _gc_timer->register_gc_end(); 1042 1043 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1044 } 1045 1046 size_t ParNewGeneration::desired_plab_sz() { 1047 return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers()); 1048 } 1049 1050 static int sum; 1051 void ParNewGeneration::waste_some_time() { 1052 for (int i = 0; i < 100; i++) { 1053 sum += i; 1054 } 1055 } 1056 1057 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); 1058 1059 // Because of concurrency, there are times where an object for which 1060 // "is_forwarded()" is true contains an "interim" forwarding pointer 1061 // value. Such a value will soon be overwritten with a real value. 1062 // This method requires "obj" to have a forwarding pointer, and waits, if 1063 // necessary for a real one to be inserted, and returns it. 1064 1065 oop ParNewGeneration::real_forwardee(oop obj) { 1066 oop forward_ptr = obj->forwardee(); 1067 if (forward_ptr != ClaimedForwardPtr) { 1068 return forward_ptr; 1069 } else { 1070 return real_forwardee_slow(obj); 1071 } 1072 } 1073 1074 oop ParNewGeneration::real_forwardee_slow(oop obj) { 1075 // Spin-read if it is claimed but not yet written by another thread. 1076 oop forward_ptr = obj->forwardee(); 1077 while (forward_ptr == ClaimedForwardPtr) { 1078 waste_some_time(); 1079 assert(obj->is_forwarded(), "precondition"); 1080 forward_ptr = obj->forwardee(); 1081 } 1082 return forward_ptr; 1083 } 1084 1085 // Multiple GC threads may try to promote an object. If the object 1086 // is successfully promoted, a forwarding pointer will be installed in 1087 // the object in the young generation. This method claims the right 1088 // to install the forwarding pointer before it copies the object, 1089 // thus avoiding the need to undo the copy as in 1090 // copy_to_survivor_space_avoiding_with_undo. 1091 1092 oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, 1093 oop old, 1094 size_t sz, 1095 markOop m) { 1096 // In the sequential version, this assert also says that the object is 1097 // not forwarded. That might not be the case here. It is the case that 1098 // the caller observed it to be not forwarded at some time in the past. 1099 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); 1100 1101 // The sequential code read "old->age()" below. That doesn't work here, 1102 // since the age is in the mark word, and that might be overwritten with 1103 // a forwarding pointer by a parallel thread. So we must save the mark 1104 // word in a local and then analyze it. 1105 oopDesc dummyOld; 1106 dummyOld.set_mark_raw(m); 1107 assert(!dummyOld.is_forwarded(), 1108 "should not be called with forwarding pointer mark word."); 1109 1110 oop new_obj = NULL; 1111 oop forward_ptr; 1112 1113 // Try allocating obj in to-space (unless too old) 1114 if (dummyOld.age() < tenuring_threshold()) { 1115 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); 1116 if (new_obj == NULL) { 1117 set_survivor_overflow(true); 1118 } 1119 } 1120 1121 if (new_obj == NULL) { 1122 // Either to-space is full or we decided to promote try allocating obj tenured 1123 1124 // Attempt to install a null forwarding pointer (atomically), 1125 // to claim the right to install the real forwarding pointer. 1126 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); 1127 if (forward_ptr != NULL) { 1128 // someone else beat us to it. 1129 return real_forwardee(old); 1130 } 1131 1132 if (!_promotion_failed) { 1133 new_obj = _old_gen->par_promote(par_scan_state->thread_num(), 1134 old, m, sz); 1135 } 1136 1137 if (new_obj == NULL) { 1138 // promotion failed, forward to self 1139 _promotion_failed = true; 1140 new_obj = old; 1141 1142 par_scan_state->preserved_marks()->push_if_necessary(old, m); 1143 par_scan_state->register_promotion_failure(sz); 1144 } 1145 1146 old->forward_to(new_obj); 1147 forward_ptr = NULL; 1148 } else { 1149 // Is in to-space; do copying ourselves. 1150 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); 1151 assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value."); 1152 forward_ptr = old->forward_to_atomic(new_obj); 1153 // Restore the mark word copied above. 1154 new_obj->set_mark_raw(m); 1155 // Increment age if obj still in new generation 1156 new_obj->incr_age(); 1157 par_scan_state->age_table()->add(new_obj, sz); 1158 } 1159 assert(new_obj != NULL, "just checking"); 1160 1161 // This code must come after the CAS test, or it will print incorrect 1162 // information. 1163 log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", 1164 is_in_reserved(new_obj) ? "copying" : "tenuring", 1165 new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); 1166 1167 if (forward_ptr == NULL) { 1168 oop obj_to_push = new_obj; 1169 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { 1170 // Length field used as index of next element to be scanned. 1171 // Real length can be obtained from real_forwardee() 1172 arrayOop(old)->set_length(0); 1173 obj_to_push = old; 1174 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, 1175 "push forwarded object"); 1176 } 1177 // Push it on one of the queues of to-be-scanned objects. 1178 bool simulate_overflow = false; 1179 NOT_PRODUCT( 1180 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { 1181 // simulate a stack overflow 1182 simulate_overflow = true; 1183 } 1184 ) 1185 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { 1186 // Add stats for overflow pushes. 1187 log_develop_trace(gc)("Queue Overflow"); 1188 push_on_overflow_list(old, par_scan_state); 1189 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); 1190 } 1191 1192 return new_obj; 1193 } 1194 1195 // Oops. Someone beat us to it. Undo the allocation. Where did we 1196 // allocate it? 1197 if (is_in_reserved(new_obj)) { 1198 // Must be in to_space. 1199 assert(to()->is_in_reserved(new_obj), "Checking"); 1200 if (forward_ptr == ClaimedForwardPtr) { 1201 // Wait to get the real forwarding pointer value. 1202 forward_ptr = real_forwardee(old); 1203 } 1204 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); 1205 } 1206 1207 return forward_ptr; 1208 } 1209 1210 #ifndef PRODUCT 1211 // It's OK to call this multi-threaded; the worst thing 1212 // that can happen is that we'll get a bunch of closely 1213 // spaced simulated overflows, but that's OK, in fact 1214 // probably good as it would exercise the overflow code 1215 // under contention. 1216 bool ParNewGeneration::should_simulate_overflow() { 1217 if (_overflow_counter-- <= 0) { // just being defensive 1218 _overflow_counter = ParGCWorkQueueOverflowInterval; 1219 return true; 1220 } else { 1221 return false; 1222 } 1223 } 1224 #endif 1225 1226 // In case we are using compressed oops, we need to be careful. 1227 // If the object being pushed is an object array, then its length 1228 // field keeps track of the "grey boundary" at which the next 1229 // incremental scan will be done (see ParGCArrayScanChunk). 1230 // When using compressed oops, this length field is kept in the 1231 // lower 32 bits of the erstwhile klass word and cannot be used 1232 // for the overflow chaining pointer (OCP below). As such the OCP 1233 // would itself need to be compressed into the top 32-bits in this 1234 // case. Unfortunately, see below, in the event that we have a 1235 // promotion failure, the node to be pushed on the list can be 1236 // outside of the Java heap, so the heap-based pointer compression 1237 // would not work (we would have potential aliasing between C-heap 1238 // and Java-heap pointers). For this reason, when using compressed 1239 // oops, we simply use a worker-thread-local, non-shared overflow 1240 // list in the form of a growable array, with a slightly different 1241 // overflow stack draining strategy. If/when we start using fat 1242 // stacks here, we can go back to using (fat) pointer chains 1243 // (although some performance comparisons would be useful since 1244 // single global lists have their own performance disadvantages 1245 // as we were made painfully aware not long ago, see 6786503). 1246 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 1247 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { 1248 assert(is_in_reserved(from_space_obj), "Should be from this generation"); 1249 if (ParGCUseLocalOverflow) { 1250 // In the case of compressed oops, we use a private, not-shared 1251 // overflow stack. 1252 par_scan_state->push_on_overflow_stack(from_space_obj); 1253 } else { 1254 assert(!UseCompressedOops, "Error"); 1255 // if the object has been forwarded to itself, then we cannot 1256 // use the klass pointer for the linked list. Instead we have 1257 // to allocate an oopDesc in the C-Heap and use that for the linked list. 1258 // XXX This is horribly inefficient when a promotion failure occurs 1259 // and should be fixed. XXX FIX ME !!! 1260 #ifndef PRODUCT 1261 Atomic::inc(&_num_par_pushes); 1262 assert(_num_par_pushes > 0, "Tautology"); 1263 #endif 1264 if (from_space_obj->forwardee() == from_space_obj) { 1265 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); 1266 listhead->forward_to(from_space_obj); 1267 from_space_obj = listhead; 1268 } 1269 oop observed_overflow_list = _overflow_list; 1270 oop cur_overflow_list; 1271 do { 1272 cur_overflow_list = observed_overflow_list; 1273 if (cur_overflow_list != BUSY) { 1274 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); 1275 } else { 1276 from_space_obj->set_klass_to_list_ptr(NULL); 1277 } 1278 observed_overflow_list = 1279 Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list); 1280 } while (cur_overflow_list != observed_overflow_list); 1281 } 1282 } 1283 1284 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { 1285 bool res; 1286 1287 if (ParGCUseLocalOverflow) { 1288 res = par_scan_state->take_from_overflow_stack(); 1289 } else { 1290 assert(!UseCompressedOops, "Error"); 1291 res = take_from_overflow_list_work(par_scan_state); 1292 } 1293 return res; 1294 } 1295 1296 1297 // *NOTE*: The overflow list manipulation code here and 1298 // in CMSCollector:: are very similar in shape, 1299 // except that in the CMS case we thread the objects 1300 // directly into the list via their mark word, and do 1301 // not need to deal with special cases below related 1302 // to chunking of object arrays and promotion failure 1303 // handling. 1304 // CR 6797058 has been filed to attempt consolidation of 1305 // the common code. 1306 // Because of the common code, if you make any changes in 1307 // the code below, please check the CMS version to see if 1308 // similar changes might be needed. 1309 // See CMSCollector::par_take_from_overflow_list() for 1310 // more extensive documentation comments. 1311 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { 1312 ObjToScanQueue* work_q = par_scan_state->work_queue(); 1313 // How many to take? 1314 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 1315 (size_t)ParGCDesiredObjsFromOverflowList); 1316 1317 assert(!UseCompressedOops, "Error"); 1318 assert(par_scan_state->overflow_stack() == NULL, "Error"); 1319 if (_overflow_list == NULL) return false; 1320 1321 // Otherwise, there was something there; try claiming the list. 1322 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 1323 // Trim off a prefix of at most objsFromOverflow items 1324 Thread* tid = Thread::current(); 1325 size_t spin_count = ParallelGCThreads; 1326 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); 1327 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { 1328 // someone grabbed it before we did ... 1329 // ... we spin for a short while... 1330 os::sleep(tid, sleep_time_millis, false); 1331 if (_overflow_list == NULL) { 1332 // nothing left to take 1333 return false; 1334 } else if (_overflow_list != BUSY) { 1335 // try and grab the prefix 1336 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 1337 } 1338 } 1339 if (prefix == NULL || prefix == BUSY) { 1340 // Nothing to take or waited long enough 1341 if (prefix == NULL) { 1342 // Write back the NULL in case we overwrote it with BUSY above 1343 // and it is still the same value. 1344 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 1345 } 1346 return false; 1347 } 1348 assert(prefix != NULL && prefix != BUSY, "Error"); 1349 oop cur = prefix; 1350 for (size_t i = 1; i < objsFromOverflow; ++i) { 1351 oop next = cur->list_ptr_from_klass(); 1352 if (next == NULL) break; 1353 cur = next; 1354 } 1355 assert(cur != NULL, "Loop postcondition"); 1356 1357 // Reattach remaining (suffix) to overflow list 1358 oop suffix = cur->list_ptr_from_klass(); 1359 if (suffix == NULL) { 1360 // Write back the NULL in lieu of the BUSY we wrote 1361 // above and it is still the same value. 1362 if (_overflow_list == BUSY) { 1363 (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 1364 } 1365 } else { 1366 assert(suffix != BUSY, "Error"); 1367 // suffix will be put back on global list 1368 cur->set_klass_to_list_ptr(NULL); // break off suffix 1369 // It's possible that the list is still in the empty(busy) state 1370 // we left it in a short while ago; in that case we may be 1371 // able to place back the suffix. 1372 oop observed_overflow_list = _overflow_list; 1373 oop cur_overflow_list = observed_overflow_list; 1374 bool attached = false; 1375 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 1376 observed_overflow_list = 1377 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); 1378 if (cur_overflow_list == observed_overflow_list) { 1379 attached = true; 1380 break; 1381 } else cur_overflow_list = observed_overflow_list; 1382 } 1383 if (!attached) { 1384 // Too bad, someone else got in in between; we'll need to do a splice. 1385 // Find the last item of suffix list 1386 oop last = suffix; 1387 while (true) { 1388 oop next = last->list_ptr_from_klass(); 1389 if (next == NULL) break; 1390 last = next; 1391 } 1392 // Atomically prepend suffix to current overflow list 1393 observed_overflow_list = _overflow_list; 1394 do { 1395 cur_overflow_list = observed_overflow_list; 1396 if (cur_overflow_list != BUSY) { 1397 // Do the splice ... 1398 last->set_klass_to_list_ptr(cur_overflow_list); 1399 } else { // cur_overflow_list == BUSY 1400 last->set_klass_to_list_ptr(NULL); 1401 } 1402 observed_overflow_list = 1403 Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); 1404 } while (cur_overflow_list != observed_overflow_list); 1405 } 1406 } 1407 1408 // Push objects on prefix list onto this thread's work queue 1409 assert(prefix != NULL && prefix != BUSY, "program logic"); 1410 cur = prefix; 1411 ssize_t n = 0; 1412 while (cur != NULL) { 1413 oop obj_to_push = cur->forwardee(); 1414 oop next = cur->list_ptr_from_klass(); 1415 cur->set_klass(obj_to_push->klass()); 1416 // This may be an array object that is self-forwarded. In that case, the list pointer 1417 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1418 if (!is_in_reserved(cur)) { 1419 // This can become a scaling bottleneck when there is work queue overflow coincident 1420 // with promotion failure. 1421 oopDesc* f = cur; 1422 FREE_C_HEAP_ARRAY(oopDesc, f); 1423 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1424 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1425 obj_to_push = cur; 1426 } 1427 bool ok = work_q->push(obj_to_push); 1428 assert(ok, "Should have succeeded"); 1429 cur = next; 1430 n++; 1431 } 1432 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); 1433 #ifndef PRODUCT 1434 assert(_num_par_pushes >= n, "Too many pops?"); 1435 Atomic::sub(n, &_num_par_pushes); 1436 #endif 1437 return true; 1438 } 1439 #undef BUSY 1440 1441 void ParNewGeneration::ref_processor_init() { 1442 if (_ref_processor == NULL) { 1443 // Allocate and initialize a reference processor 1444 _span_based_discoverer.set_span(_reserved); 1445 _ref_processor = 1446 new ReferenceProcessor(&_span_based_discoverer, // span 1447 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 1448 ParallelGCThreads, // mt processing degree 1449 refs_discovery_is_mt(), // mt discovery 1450 ParallelGCThreads, // mt discovery degree 1451 refs_discovery_is_atomic(), // atomic_discovery 1452 NULL); // is_alive_non_header 1453 } 1454 } 1455 1456 const char* ParNewGeneration::name() const { 1457 return "par new generation"; 1458 } 1459 1460 void ParNewGeneration::restore_preserved_marks() { 1461 SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers()); 1462 _preserved_marks_set.restore(&task_executor); 1463 }