src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/genCollectedHeap.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7214 : imported patch remove_n_gen
rev 7215 : imported patch remove_levels


 101 
 102   size_t heap_alignment = collector_policy()->heap_alignment();
 103 
 104   heap_address = allocate(heap_alignment, &total_reserved,
 105                           &n_covered_regions, &heap_rs);
 106 
 107   if (!heap_rs.is_reserved()) {
 108     vm_shutdown_during_initialization(
 109       "Could not reserve enough space for object heap");
 110     return JNI_ENOMEM;
 111   }
 112 
 113   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 114 
 115   _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
 116   set_barrier_set(rem_set()->bs());
 117 
 118   _gch = this;
 119 
 120   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 121   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
 122   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 123 
 124   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 125   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
 126   heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size());
 127 
 128   clear_incremental_collection_failed();
 129 
 130 #if INCLUDE_ALL_GCS
 131   // If we are running CMS, create the collector responsible
 132   // for collecting the CMS generations.
 133   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 134     bool success = create_cms_collector();
 135     if (!success) return JNI_ENOMEM;
 136   }
 137 #endif // INCLUDE_ALL_GCS
 138 
 139   return JNI_OK;
 140 }
 141 
 142 char* GenCollectedHeap::allocate(size_t alignment,
 143                                  size_t* _total_reserved,
 144                                  int* _n_covered_regions,
 145                                  ReservedSpace* heap_rs){


 189   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 190                                  _old_gen->capacity(),
 191                                  def_new_gen->from()->capacity());
 192   policy->initialize_gc_policy_counters();
 193 }
 194 
 195 void GenCollectedHeap::ref_processing_init() {
 196   SharedHeap::ref_processing_init();
 197   _young_gen->ref_processor_init();
 198   _old_gen->ref_processor_init();
 199 }
 200 
 201 size_t GenCollectedHeap::capacity() const {
 202   return _young_gen->capacity() + _old_gen->capacity();
 203 }
 204 
 205 size_t GenCollectedHeap::used() const {
 206   return _young_gen->used() + _old_gen->used();
 207 }
 208 
 209 // Save the "used_region" for generations level and lower.
 210 void GenCollectedHeap::save_used_regions(int level) {
 211   assert(level < _gen_policy->number_of_generations(), "Illegal level parameter");
 212   if (level == 1) {
 213     _old_gen->save_used_region();
 214   }
 215   _young_gen->save_used_region();
 216 }
 217 
 218 size_t GenCollectedHeap::max_capacity() const {
 219   return _young_gen->max_capacity() + _old_gen->max_capacity();
 220 }
 221 
 222 // Update the _full_collections_completed counter
 223 // at the end of a stop-world full GC.
 224 unsigned int GenCollectedHeap::update_full_collections_completed() {
 225   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 226   assert(_full_collections_completed <= _total_full_collections,
 227          "Can't complete more collections than were started");
 228   _full_collections_completed = _total_full_collections;
 229   ml.notify_all();
 230   return _full_collections_completed;
 231 }
 232 
 233 // Update the _full_collections_completed counter, as appropriate,
 234 // at the end of a concurrent GC cycle. Note the conditional update


 316 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 317                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 318   // Timer for individual generations. Last argument is false: no CR
 319   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 320   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 321   // so we can assume here that the next GC id is what we want.
 322   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 323   TraceCollectorStats tcs(gen->counters());
 324   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 325 
 326   size_t prev_used = gen->used();
 327   gen->stat_record()->invocations++;
 328   gen->stat_record()->accumulated_time.start();
 329 
 330   // Must be done anew before each collection because
 331   // a previous collection will do mangling and will
 332   // change top of some spaces.
 333   record_gen_tops_before_GC();
 334 
 335   if (PrintGC && Verbose) {








 336     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 337                         gen->level(),
 338                         gen->stat_record()->invocations,
 339                         size * HeapWordSize);
 340   }
 341 
 342   if (run_verification && VerifyBeforeGC) {
 343     HandleMark hm;  // Discard invalid handles created during verification
 344     Universe::verify(" VerifyBeforeGC:");
 345   }
 346   COMPILER2_PRESENT(DerivedPointerTable::clear());
 347 
 348   // Do collection work
 349   {
 350     // Note on ref discovery: For what appear to be historical reasons,
 351     // GCH enables and disabled (by enqueing) refs discovery.
 352     // In the future this should be moved into the generation's
 353     // collect method so that ref discovery and enqueueing concerns
 354     // are local to a generation. The collect method could return
 355     // an appropriate indication in the case that notification on
 356     // the ref lock was needed. This will make the treatment of
 357     // weak refs more uniform (and indeed remove such concerns


 379       rp->enqueue_discovered_references();
 380     } else {
 381       rp->set_enqueuing_is_done(false);
 382     }
 383     rp->verify_no_references_recorded();
 384   }
 385 
 386   // Determine if allocation request was met.
 387   if (size > 0) {
 388     if (!is_tlab || gen->supports_tlab_allocation()) {
 389       if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) {
 390         size = 0;
 391       }
 392     }
 393   }
 394 
 395   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 396 
 397   gen->stat_record()->accumulated_time.stop();
 398 
 399   update_gc_stats(gen->level(), full);
 400 
 401   if (run_verification && VerifyAfterGC) {
 402     HandleMark hm;  // Discard invalid handles created during verification
 403     Universe::verify(" VerifyAfterGC:");
 404   }
 405 
 406   if (PrintGCDetails) {
 407     gclog_or_tty->print(":");
 408     gen->print_heap_change(prev_used);
 409   }
 410 }
 411 
 412 void GenCollectedHeap::do_collection(bool   full,
 413                                      bool   clear_all_soft_refs,
 414                                      size_t size,
 415                                      bool   is_tlab,
 416                                      int    max_level) {
 417   ResourceMark rm;
 418   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 419 
 420   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 421   assert(my_thread->is_VM_thread() ||
 422          my_thread->is_ConcurrentGC_thread(),
 423          "incorrect thread type capability");
 424   assert(Heap_lock->is_locked(),
 425          "the requesting thread should have the Heap_lock");
 426   guarantee(!is_gc_active(), "collection is not reentrant");
 427   assert(max_level < n_gens(), "sanity check");
 428 
 429   if (GC_locker::check_active_before_gc()) {
 430     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 431   }
 432 
 433   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 434                           collector_policy()->should_clear_all_soft_refs();
 435 
 436   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 437 
 438   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 439 
 440   print_heap_before_gc();
 441 
 442   {
 443     FlagSetting fl(_is_gc_active, true);
 444 
 445     bool complete = full && (max_level == (n_gens()-1));
 446     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 447     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 448     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 449     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 450     // so we can assume here that the next GC id is what we want.
 451     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 452 
 453     gc_prologue(complete);
 454     increment_total_collections(complete);
 455 
 456     size_t gch_prev_used = used();
 457     bool must_restore_marks_for_biased_locking = false;

 458     bool run_verification = total_collections() >= VerifyGCStartAt;
 459 
 460     if (_young_gen->performs_in_place_marking() ||
 461         _old_gen->performs_in_place_marking()) {
 462       // We want to avoid doing this for
 463       // scavenge-only collections where it's unnecessary.
 464       must_restore_marks_for_biased_locking = true;
 465       BiasedLocking::preserve_marks();
 466     }
 467 
 468     bool prepared_for_verification = false;
 469     int max_level_collected = 0;
 470     if (!(full && _old_gen->full_collects_younger_generations()) &&
 471         _young_gen->should_collect(full, size, is_tlab)) {
 472       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 473         prepare_for_verify();
 474         prepared_for_verification = true;
 475       }
 476       collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs);
 477     }
 478     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
 479       if (!complete) {
 480         // The full_collections increment was missed above.
 481         increment_total_full_collections();
 482       }
 483       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 484       if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) {
 485         if (!prepared_for_verification) {
 486           prepare_for_verify();
 487         }
 488       }
 489       collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs);
 490       max_level_collected = 1;
 491     }
 492 
 493     // Update "complete" boolean wrt what actually transpired --
 494     // for instance, a promotion failure could have led to
 495     // a whole heap collection.
 496     complete = complete || (max_level_collected == n_gens() - 1);
 497 
 498     if (complete) { // We did a "major" collection
 499       // FIXME: See comment at pre_full_gc_dump call
 500       post_full_gc_dump(NULL);   // do any post full gc dumps
 501     }
 502 
 503     if (PrintGCDetails) {
 504       print_heap_change(gch_prev_used);
 505 
 506       // Print metaspace info for full GC with PrintGCDetails flag.
 507       if (complete) {
 508         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 509       }
 510     }
 511 
 512     // Adjust generation sizes.
 513     if (max_level_collected == 1) {
 514       _old_gen->compute_new_size();
 515     }
 516     _young_gen->compute_new_size();
 517 
 518     if (complete) {
 519       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 520       ClassLoaderDataGraph::purge();
 521       MetaspaceAux::verify_metrics();
 522       // Resize the metaspace capacity after full collections
 523       MetaspaceGC::compute_new_size();
 524       update_full_collections_completed();
 525     }
 526 
 527     // Track memory usage and detect low memory after GC finishes
 528     MemoryService::track_memory_usage();
 529 
 530     gc_epilogue(complete);
 531 
 532     if (must_restore_marks_for_biased_locking) {
 533       BiasedLocking::restore_marks();
 534     }
 535   }
 536 
 537   print_heap_after_gc();
 538 
 539 #ifdef TRACESPINNING
 540   ParallelTaskTerminator::print_termination_counts();
 541 #endif
 542 }
 543 
 544 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 545   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 546 }
 547 
 548 void GenCollectedHeap::set_par_threads(uint t) {
 549   SharedHeap::set_par_threads(t);
 550   _gen_process_roots_tasks->set_n_threads(t);
 551 }
 552 
 553 void GenCollectedHeap::
 554 gen_process_roots(int level,
 555                   bool younger_gens_as_roots,
 556                   bool activate_scope,
 557                   SharedHeap::ScanningOption so,
 558                   OopsInGenClosure* not_older_gens,
 559                   OopsInGenClosure* weak_roots,
 560                   OopsInGenClosure* older_gens,
 561                   CLDClosure* cld_closure,
 562                   CLDClosure* weak_cld_closure,
 563                   CodeBlobClosure* code_closure) {
 564 
 565   // General roots.
 566   SharedHeap::process_roots(activate_scope, so,
 567                             not_older_gens, weak_roots,
 568                             cld_closure, weak_cld_closure,
 569                             code_closure);
 570 
 571   if (younger_gens_as_roots) {
 572     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 573       if (level == 1) {
 574         not_older_gens->set_generation(_young_gen);
 575         _young_gen->oop_iterate(not_older_gens);
 576       }
 577       not_older_gens->reset_generation();
 578     }
 579   }
 580   // When collection is parallel, all threads get to cooperate to do
 581   // older-gen scanning.
 582   if (level == 0) {
 583     older_gens->set_generation(_old_gen);
 584     rem_set()->younger_refs_iterate(_old_gen, older_gens);
 585     older_gens->reset_generation();
 586   }
 587 
 588   _gen_process_roots_tasks->all_tasks_completed();
 589 }
 590 
 591 void GenCollectedHeap::
 592 gen_process_roots(int level,
 593                   bool younger_gens_as_roots,
 594                   bool activate_scope,
 595                   SharedHeap::ScanningOption so,
 596                   bool only_strong_roots,
 597                   OopsInGenClosure* not_older_gens,
 598                   OopsInGenClosure* older_gens,
 599                   CLDClosure* cld_closure) {
 600 
 601   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 602 
 603   bool is_moving_collection = false;
 604   if (level == 0 || is_adjust_phase) {
 605     // young collections are always moving
 606     is_moving_collection = true;
 607   }
 608 
 609   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 610   CodeBlobClosure* code_closure = &mark_code_closure;
 611 
 612   gen_process_roots(level,
 613                     younger_gens_as_roots,
 614                     activate_scope, so,
 615                     not_older_gens, only_strong_roots ? NULL : not_older_gens,
 616                     older_gens,
 617                     cld_closure, only_strong_roots ? NULL : cld_closure,
 618                     code_closure);
 619 
 620 }
 621 
 622 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 623   SharedHeap::process_weak_roots(root_closure);
 624   // "Local" "weak" refs
 625   _young_gen->ref_processor()->weak_oops_do(root_closure);
 626   _old_gen->ref_processor()->weak_oops_do(root_closure);
 627 }
 628 
 629 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 630 void GenCollectedHeap::                                                 \
 631 oop_since_save_marks_iterate(int level,                                 \
 632                              OopClosureType* cur,                       \
 633                              OopClosureType* older) {                   \
 634   if (level == 0) {                                                     \
 635     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 636     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 637   } else {                                                              \
 638     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 639   }                                                                     \
 640 }
 641 
 642 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 643 
 644 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 645 
 646 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 647   if (level == 0) {
 648     if (!_young_gen->no_allocs_since_save_marks()) return false;
 649   }
 650   if (!_old_gen->no_allocs_since_save_marks()) return false;
 651   return true;
 652 }
 653 
 654 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 655   return _young_gen->supports_inline_contig_alloc();
 656 }
 657 
 658 HeapWord** GenCollectedHeap::top_addr() const {
 659   return _young_gen->top_addr();
 660 }
 661 
 662 HeapWord** GenCollectedHeap::end_addr() const {
 663   return _young_gen->end_addr();
 664 }
 665 
 666 // public collection interfaces
 667 
 668 void GenCollectedHeap::collect(GCCause::Cause cause) {
 669   if (should_do_concurrent_full_gc(cause)) {
 670 #if INCLUDE_ALL_GCS
 671     // mostly concurrent full collection
 672     collect_mostly_concurrent(cause);
 673 #else  // INCLUDE_ALL_GCS
 674     ShouldNotReachHere();
 675 #endif // INCLUDE_ALL_GCS
 676   } else if (cause == GCCause::_wb_young_gc) {
 677     // minor collection for WhiteBox API
 678     collect(cause, 0);
 679   } else {
 680 #ifdef ASSERT
 681   if (cause == GCCause::_scavenge_alot) {
 682     // minor collection only
 683     collect(cause, 0);
 684   } else {
 685     // Stop-the-world full collection
 686     collect(cause, n_gens() - 1);
 687   }
 688 #else
 689     // Stop-the-world full collection
 690     collect(cause, n_gens() - 1);
 691 #endif
 692   }
 693 }
 694 
 695 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 696   // The caller doesn't have the Heap_lock
 697   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 698   MutexLocker ml(Heap_lock);
 699   collect_locked(cause, max_level);
 700 }
 701 
 702 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 703   // The caller has the Heap_lock
 704   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 705   collect_locked(cause, n_gens() - 1);
 706 }
 707 
 708 // this is the private collection interface
 709 // The Heap_lock is expected to be held on entry.
 710 
 711 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 712   // Read the GC count while holding the Heap_lock
 713   unsigned int gc_count_before      = total_collections();
 714   unsigned int full_gc_count_before = total_full_collections();
 715   {
 716     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 717     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 718                          cause, max_level);
 719     VMThread::execute(&op);
 720   }
 721 }
 722 
 723 #if INCLUDE_ALL_GCS
 724 bool GenCollectedHeap::create_cms_collector() {
 725 
 726   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 727          "Unexpected generation kinds");
 728   // Skip two header words in the block content verification
 729   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 730   CMSCollector* collector = new CMSCollector(
 731     (ConcurrentMarkSweepGeneration*)_old_gen,
 732     _rem_set->as_CardTableRS(),
 733     (ConcurrentMarkSweepPolicy*) collector_policy());
 734 
 735   if (collector == NULL || !collector->completed_initialization()) {
 736     if (collector) {
 737       delete collector;  // Be nice in embedded situation
 738     }


 741   }
 742   return true;  // success
 743 }
 744 
 745 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 746   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 747 
 748   MutexLocker ml(Heap_lock);
 749   // Read the GC counts while holding the Heap_lock
 750   unsigned int full_gc_count_before = total_full_collections();
 751   unsigned int gc_count_before      = total_collections();
 752   {
 753     MutexUnlocker mu(Heap_lock);
 754     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 755     VMThread::execute(&op);
 756   }
 757 }
 758 #endif // INCLUDE_ALL_GCS
 759 
 760 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 761    do_full_collection(clear_all_soft_refs, _gen_policy->number_of_generations() - 1);
 762 }
 763 
 764 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 765                                           int max_level) {
 766   int local_max_level;
 767   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 768       gc_cause() == GCCause::_gc_locker) {
 769     local_max_level = 0;
 770   } else {
 771     local_max_level = max_level;
 772   }
 773 
 774   do_collection(true                 /* full */,
 775                 clear_all_soft_refs  /* clear_all_soft_refs */,
 776                 0                    /* size */,
 777                 false                /* is_tlab */,
 778                 local_max_level      /* max_level */);
 779   // Hack XXX FIX ME !!!
 780   // A scavenge may not have been attempted, or may have
 781   // been attempted and failed, because the old gen was too full
 782   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 783       incremental_collection_will_fail(false /* don't consult_young */)) {
 784     if (PrintGCDetails) {
 785       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 786                              "because scavenge failed");
 787     }
 788     // This time allow the old gen to be collected as well
 789     do_collection(true                 /* full */,
 790                   clear_all_soft_refs  /* clear_all_soft_refs */,
 791                   0                    /* size */,
 792                   false                /* is_tlab */,
 793                   n_gens() - 1         /* max_level */);
 794   }
 795 }
 796 
 797 bool GenCollectedHeap::is_in_young(oop p) {
 798   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 799   assert(result == _young_gen->is_in_reserved(p),
 800          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 801   return result;
 802 }
 803 
 804 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 805 bool GenCollectedHeap::is_in(const void* p) const {
 806   #ifndef ASSERT
 807   guarantee(VerifyBeforeGC      ||
 808             VerifyDuringGC      ||
 809             VerifyBeforeExit    ||
 810             VerifyDuringStartup ||
 811             PrintAssembly       ||
 812             tty->count() != 0   ||   // already printing
 813             VerifyAfterGC       ||


1181   }
1182 }
1183 #endif  // not PRODUCT
1184 
1185 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1186  public:
1187   void do_generation(Generation* gen) {
1188     gen->ensure_parsability();
1189   }
1190 };
1191 
1192 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1193   CollectedHeap::ensure_parsability(retire_tlabs);
1194   GenEnsureParsabilityClosure ep_cl;
1195   generation_iterate(&ep_cl, false);
1196 }
1197 
1198 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1199                                               oop obj,
1200                                               size_t obj_size) {
1201   guarantee(old_gen->level() == 1, "We only get here with an old generation");
1202   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1203   HeapWord* result = NULL;
1204 
1205   result = old_gen->expand_and_allocate(obj_size, false);
1206 
1207   if (result != NULL) {
1208     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1209   }
1210   return oop(result);
1211 }
1212 
1213 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1214   jlong _time;   // in ms
1215   jlong _now;    // in ms
1216 
1217  public:
1218   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1219 
1220   jlong time() { return _time; }
1221 




 101 
 102   size_t heap_alignment = collector_policy()->heap_alignment();
 103 
 104   heap_address = allocate(heap_alignment, &total_reserved,
 105                           &n_covered_regions, &heap_rs);
 106 
 107   if (!heap_rs.is_reserved()) {
 108     vm_shutdown_during_initialization(
 109       "Could not reserve enough space for object heap");
 110     return JNI_ENOMEM;
 111   }
 112 
 113   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 114 
 115   _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
 116   set_barrier_set(rem_set()->bs());
 117 
 118   _gch = this;
 119 
 120   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 121   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
 122   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 123 
 124   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 125   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
 126   heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size());
 127 
 128   clear_incremental_collection_failed();
 129 
 130 #if INCLUDE_ALL_GCS
 131   // If we are running CMS, create the collector responsible
 132   // for collecting the CMS generations.
 133   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 134     bool success = create_cms_collector();
 135     if (!success) return JNI_ENOMEM;
 136   }
 137 #endif // INCLUDE_ALL_GCS
 138 
 139   return JNI_OK;
 140 }
 141 
 142 char* GenCollectedHeap::allocate(size_t alignment,
 143                                  size_t* _total_reserved,
 144                                  int* _n_covered_regions,
 145                                  ReservedSpace* heap_rs){


 189   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 190                                  _old_gen->capacity(),
 191                                  def_new_gen->from()->capacity());
 192   policy->initialize_gc_policy_counters();
 193 }
 194 
 195 void GenCollectedHeap::ref_processing_init() {
 196   SharedHeap::ref_processing_init();
 197   _young_gen->ref_processor_init();
 198   _old_gen->ref_processor_init();
 199 }
 200 
 201 size_t GenCollectedHeap::capacity() const {
 202   return _young_gen->capacity() + _old_gen->capacity();
 203 }
 204 
 205 size_t GenCollectedHeap::used() const {
 206   return _young_gen->used() + _old_gen->used();
 207 }
 208 
 209 void GenCollectedHeap::save_used_regions() {



 210   _old_gen->save_used_region();

 211   _young_gen->save_used_region();
 212 }
 213 
 214 size_t GenCollectedHeap::max_capacity() const {
 215   return _young_gen->max_capacity() + _old_gen->max_capacity();
 216 }
 217 
 218 // Update the _full_collections_completed counter
 219 // at the end of a stop-world full GC.
 220 unsigned int GenCollectedHeap::update_full_collections_completed() {
 221   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 222   assert(_full_collections_completed <= _total_full_collections,
 223          "Can't complete more collections than were started");
 224   _full_collections_completed = _total_full_collections;
 225   ml.notify_all();
 226   return _full_collections_completed;
 227 }
 228 
 229 // Update the _full_collections_completed counter, as appropriate,
 230 // at the end of a concurrent GC cycle. Note the conditional update


 312 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 313                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 314   // Timer for individual generations. Last argument is false: no CR
 315   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 316   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 317   // so we can assume here that the next GC id is what we want.
 318   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 319   TraceCollectorStats tcs(gen->counters());
 320   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 321 
 322   size_t prev_used = gen->used();
 323   gen->stat_record()->invocations++;
 324   gen->stat_record()->accumulated_time.start();
 325 
 326   // Must be done anew before each collection because
 327   // a previous collection will do mangling and will
 328   // change top of some spaces.
 329   record_gen_tops_before_GC();
 330 
 331   if (PrintGC && Verbose) {
 332     // I didn't want to change the logging when removing the level concept,
 333     // but I guess this logging could say young/old or something instead of 0/1.
 334     int level;
 335     if (gen == GenCollectedHeap::heap()->young_gen()) {
 336       level = 0;
 337     } else {
 338       level = 1;
 339     }
 340     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 341                         level,
 342                         gen->stat_record()->invocations,
 343                         size * HeapWordSize);
 344   }
 345 
 346   if (run_verification && VerifyBeforeGC) {
 347     HandleMark hm;  // Discard invalid handles created during verification
 348     Universe::verify(" VerifyBeforeGC:");
 349   }
 350   COMPILER2_PRESENT(DerivedPointerTable::clear());
 351 
 352   // Do collection work
 353   {
 354     // Note on ref discovery: For what appear to be historical reasons,
 355     // GCH enables and disabled (by enqueing) refs discovery.
 356     // In the future this should be moved into the generation's
 357     // collect method so that ref discovery and enqueueing concerns
 358     // are local to a generation. The collect method could return
 359     // an appropriate indication in the case that notification on
 360     // the ref lock was needed. This will make the treatment of
 361     // weak refs more uniform (and indeed remove such concerns


 383       rp->enqueue_discovered_references();
 384     } else {
 385       rp->set_enqueuing_is_done(false);
 386     }
 387     rp->verify_no_references_recorded();
 388   }
 389 
 390   // Determine if allocation request was met.
 391   if (size > 0) {
 392     if (!is_tlab || gen->supports_tlab_allocation()) {
 393       if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) {
 394         size = 0;
 395       }
 396     }
 397   }
 398 
 399   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 400 
 401   gen->stat_record()->accumulated_time.stop();
 402 
 403   update_gc_stats(gen, full);
 404 
 405   if (run_verification && VerifyAfterGC) {
 406     HandleMark hm;  // Discard invalid handles created during verification
 407     Universe::verify(" VerifyAfterGC:");
 408   }
 409 
 410   if (PrintGCDetails) {
 411     gclog_or_tty->print(":");
 412     gen->print_heap_change(prev_used);
 413   }
 414 }
 415 
 416 void GenCollectedHeap::do_collection(bool             full,
 417                                      bool             clear_all_soft_refs,
 418                                      size_t           size,
 419                                      bool             is_tlab,
 420                                      Generation::Type max_generation) {
 421   ResourceMark rm;
 422   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 423 
 424   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 425   assert(my_thread->is_VM_thread() ||
 426          my_thread->is_ConcurrentGC_thread(),
 427          "incorrect thread type capability");
 428   assert(Heap_lock->is_locked(),
 429          "the requesting thread should have the Heap_lock");
 430   guarantee(!is_gc_active(), "collection is not reentrant");

 431 
 432   if (GC_locker::check_active_before_gc()) {
 433     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 434   }
 435 
 436   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 437                           collector_policy()->should_clear_all_soft_refs();
 438 
 439   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 440 
 441   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 442 
 443   print_heap_before_gc();
 444 
 445   {
 446     FlagSetting fl(_is_gc_active, true);
 447 
 448     bool complete = full && (max_generation == Generation::Old);
 449     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 450     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 451     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 452     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 453     // so we can assume here that the next GC id is what we want.
 454     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 455 
 456     gc_prologue(complete);
 457     increment_total_collections(complete);
 458 
 459     size_t gch_prev_used = used();
 460     bool must_restore_marks_for_biased_locking = false;
 461     bool old_collected = false;
 462     bool run_verification = total_collections() >= VerifyGCStartAt;
 463 
 464     if (_young_gen->performs_in_place_marking() ||
 465         _old_gen->performs_in_place_marking()) {
 466       // We want to avoid doing this for
 467       // scavenge-only collections where it's unnecessary.
 468       must_restore_marks_for_biased_locking = true;
 469       BiasedLocking::preserve_marks();
 470     }
 471 
 472     bool prepared_for_verification = false;

 473     if (!(full && _old_gen->full_collects_younger_generations()) &&
 474         _young_gen->should_collect(full, size, is_tlab)) {
 475       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 476         prepare_for_verify();
 477         prepared_for_verification = true;
 478       }
 479       collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs);
 480     }
 481     if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) {
 482       if (!complete) {
 483         // The full_collections increment was missed above.
 484         increment_total_full_collections();
 485       }
 486       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 487       if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) {
 488         if (!prepared_for_verification) {
 489           prepare_for_verify();
 490         }
 491       }
 492       collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs);
 493       old_collected = true;
 494     }
 495 
 496     // Update "complete" boolean wrt what actually transpired --
 497     // for instance, a promotion failure could have led to
 498     // a whole heap collection.
 499     complete = complete || old_collected;
 500 
 501     if (complete) { // We did a "major" collection
 502       // FIXME: See comment at pre_full_gc_dump call
 503       post_full_gc_dump(NULL);   // do any post full gc dumps
 504     }
 505 
 506     if (PrintGCDetails) {
 507       print_heap_change(gch_prev_used);
 508 
 509       // Print metaspace info for full GC with PrintGCDetails flag.
 510       if (complete) {
 511         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 512       }
 513     }
 514 
 515     // Adjust generation sizes.
 516     if (old_collected) {
 517       _old_gen->compute_new_size();
 518     }
 519     _young_gen->compute_new_size();
 520 
 521     if (complete) {
 522       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 523       ClassLoaderDataGraph::purge();
 524       MetaspaceAux::verify_metrics();
 525       // Resize the metaspace capacity after full collections
 526       MetaspaceGC::compute_new_size();
 527       update_full_collections_completed();
 528     }
 529 
 530     // Track memory usage and detect low memory after GC finishes
 531     MemoryService::track_memory_usage();
 532 
 533     gc_epilogue(complete);
 534 
 535     if (must_restore_marks_for_biased_locking) {
 536       BiasedLocking::restore_marks();
 537     }
 538   }
 539 
 540   print_heap_after_gc();
 541 
 542 #ifdef TRACESPINNING
 543   ParallelTaskTerminator::print_termination_counts();
 544 #endif
 545 }
 546 
 547 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 548   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 549 }
 550 
 551 void GenCollectedHeap::set_par_threads(uint t) {
 552   SharedHeap::set_par_threads(t);
 553   _gen_process_roots_tasks->set_n_threads(t);
 554 }
 555 
 556 void GenCollectedHeap::
 557 gen_process_roots(Generation::Type type,
 558                   bool younger_gens_as_roots,
 559                   bool activate_scope,
 560                   SharedHeap::ScanningOption so,
 561                   OopsInGenClosure* not_older_gens,
 562                   OopsInGenClosure* weak_roots,
 563                   OopsInGenClosure* older_gens,
 564                   CLDClosure* cld_closure,
 565                   CLDClosure* weak_cld_closure,
 566                   CodeBlobClosure* code_closure) {
 567 
 568   // General roots.
 569   SharedHeap::process_roots(activate_scope, so,
 570                             not_older_gens, weak_roots,
 571                             cld_closure, weak_cld_closure,
 572                             code_closure);
 573 
 574   if (younger_gens_as_roots) {
 575     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 576       if (type == Generation::Old) {
 577         not_older_gens->set_generation(_young_gen);
 578         _young_gen->oop_iterate(not_older_gens);
 579       }
 580       not_older_gens->reset_generation();
 581     }
 582   }
 583   // When collection is parallel, all threads get to cooperate to do
 584   // old generation scanning.
 585   if (type == Generation::Young) {
 586     older_gens->set_generation(_old_gen);
 587     rem_set()->younger_refs_iterate(_old_gen, older_gens);
 588     older_gens->reset_generation();
 589   }
 590 
 591   _gen_process_roots_tasks->all_tasks_completed();
 592 }
 593 
 594 void GenCollectedHeap::
 595 gen_process_roots(Generation::Type type,
 596                   bool younger_gens_as_roots,
 597                   bool activate_scope,
 598                   SharedHeap::ScanningOption so,
 599                   bool only_strong_roots,
 600                   OopsInGenClosure* not_older_gens,
 601                   OopsInGenClosure* older_gens,
 602                   CLDClosure* cld_closure) {
 603 
 604   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 605 
 606   bool is_moving_collection = false;
 607   if (type == Generation::Young || is_adjust_phase) {
 608     // young collections are always moving
 609     is_moving_collection = true;
 610   }
 611 
 612   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 613   CodeBlobClosure* code_closure = &mark_code_closure;
 614 
 615   gen_process_roots(type,
 616                     younger_gens_as_roots,
 617                     activate_scope, so,
 618                     not_older_gens, only_strong_roots ? NULL : not_older_gens,
 619                     older_gens,
 620                     cld_closure, only_strong_roots ? NULL : cld_closure,
 621                     code_closure);
 622 
 623 }
 624 
 625 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 626   SharedHeap::process_weak_roots(root_closure);
 627   // "Local" "weak" refs
 628   _young_gen->ref_processor()->weak_oops_do(root_closure);
 629   _old_gen->ref_processor()->weak_oops_do(root_closure);
 630 }
 631 
 632 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 633 void GenCollectedHeap::                                                 \
 634 oop_since_save_marks_iterate(Generation::Type gen,                      \
 635                              OopClosureType* cur,                       \
 636                              OopClosureType* older) {                   \
 637   if (gen == Generation::Young) {                                       \
 638     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 639     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 640   } else {                                                              \
 641     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 642   }                                                                     \
 643 }
 644 
 645 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 646 
 647 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 648 
 649 bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) {
 650   return include_young && _young_gen->no_allocs_since_save_marks() ||
 651       _old_gen->no_allocs_since_save_marks();



 652 }
 653 
 654 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 655   return _young_gen->supports_inline_contig_alloc();
 656 }
 657 
 658 HeapWord** GenCollectedHeap::top_addr() const {
 659   return _young_gen->top_addr();
 660 }
 661 
 662 HeapWord** GenCollectedHeap::end_addr() const {
 663   return _young_gen->end_addr();
 664 }
 665 
 666 // public collection interfaces
 667 
 668 void GenCollectedHeap::collect(GCCause::Cause cause) {
 669   if (should_do_concurrent_full_gc(cause)) {
 670 #if INCLUDE_ALL_GCS
 671     // mostly concurrent full collection
 672     collect_mostly_concurrent(cause);
 673 #else  // INCLUDE_ALL_GCS
 674     ShouldNotReachHere();
 675 #endif // INCLUDE_ALL_GCS
 676   } else if (cause == GCCause::_wb_young_gc) {
 677     // minor collection for WhiteBox API
 678     collect(cause, Generation::Young);
 679   } else {
 680 #ifdef ASSERT
 681     if (cause == GCCause::_scavenge_alot) {
 682       // minor collection only
 683       collect(cause, Generation::Young);
 684     } else {
 685       // Stop-the-world full collection
 686       collect(cause, Generation::Old);
 687     }
 688 #else
 689     // Stop-the-world full collection
 690     collect(cause, Generation::Old);
 691 #endif
 692   }
 693 }
 694 
 695 void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_gen) {
 696   // The caller doesn't have the Heap_lock
 697   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 698   MutexLocker ml(Heap_lock);
 699   collect_locked(cause, max_gen);
 700 }
 701 
 702 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 703   // The caller has the Heap_lock
 704   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 705   collect_locked(cause, Generation::Old);
 706 }
 707 
 708 // this is the private collection interface
 709 // The Heap_lock is expected to be held on entry.
 710 
 711 void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) {
 712   // Read the GC count while holding the Heap_lock
 713   unsigned int gc_count_before      = total_collections();
 714   unsigned int full_gc_count_before = total_full_collections();
 715   {
 716     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 717     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 718                          cause, max_generation);
 719     VMThread::execute(&op);
 720   }
 721 }
 722 
 723 #if INCLUDE_ALL_GCS
 724 bool GenCollectedHeap::create_cms_collector() {
 725 
 726   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 727          "Unexpected generation kinds");
 728   // Skip two header words in the block content verification
 729   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 730   CMSCollector* collector = new CMSCollector(
 731     (ConcurrentMarkSweepGeneration*)_old_gen,
 732     _rem_set->as_CardTableRS(),
 733     (ConcurrentMarkSweepPolicy*) collector_policy());
 734 
 735   if (collector == NULL || !collector->completed_initialization()) {
 736     if (collector) {
 737       delete collector;  // Be nice in embedded situation
 738     }


 741   }
 742   return true;  // success
 743 }
 744 
 745 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 746   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 747 
 748   MutexLocker ml(Heap_lock);
 749   // Read the GC counts while holding the Heap_lock
 750   unsigned int full_gc_count_before = total_full_collections();
 751   unsigned int gc_count_before      = total_collections();
 752   {
 753     MutexUnlocker mu(Heap_lock);
 754     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 755     VMThread::execute(&op);
 756   }
 757 }
 758 #endif // INCLUDE_ALL_GCS
 759 
 760 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 761    do_full_collection(clear_all_soft_refs, Generation::Old);
 762 }
 763 
 764 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 765                                           Generation::Type max_gen) {
 766   Generation::Type local_max_gen;
 767   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 768       gc_cause() == GCCause::_gc_locker) {
 769     local_max_gen = Generation::Young;
 770   } else {
 771     local_max_gen = max_gen;
 772   }
 773 
 774   do_collection(true                 /* full */,
 775                 clear_all_soft_refs  /* clear_all_soft_refs */,
 776                 0                    /* size */,
 777                 false                /* is_tlab */,
 778                 local_max_gen        /* max_gen */);
 779   // Hack XXX FIX ME !!!
 780   // A scavenge may not have been attempted, or may have
 781   // been attempted and failed, because the old gen was too full
 782   if (local_max_gen == Generation::Young && gc_cause() == GCCause::_gc_locker &&
 783       incremental_collection_will_fail(false /* don't consult_young */)) {
 784     if (PrintGCDetails) {
 785       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 786                              "because scavenge failed");
 787     }
 788     // This time allow the old gen to be collected as well
 789     do_collection(true                 /* full */,
 790                   clear_all_soft_refs  /* clear_all_soft_refs */,
 791                   0                    /* size */,
 792                   false                /* is_tlab */,
 793                   Generation::Old      /* max_gen */);
 794   }
 795 }
 796 
 797 bool GenCollectedHeap::is_in_young(oop p) {
 798   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 799   assert(result == _young_gen->is_in_reserved(p),
 800          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 801   return result;
 802 }
 803 
 804 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 805 bool GenCollectedHeap::is_in(const void* p) const {
 806   #ifndef ASSERT
 807   guarantee(VerifyBeforeGC      ||
 808             VerifyDuringGC      ||
 809             VerifyBeforeExit    ||
 810             VerifyDuringStartup ||
 811             PrintAssembly       ||
 812             tty->count() != 0   ||   // already printing
 813             VerifyAfterGC       ||


1181   }
1182 }
1183 #endif  // not PRODUCT
1184 
1185 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1186  public:
1187   void do_generation(Generation* gen) {
1188     gen->ensure_parsability();
1189   }
1190 };
1191 
1192 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1193   CollectedHeap::ensure_parsability(retire_tlabs);
1194   GenEnsureParsabilityClosure ep_cl;
1195   generation_iterate(&ep_cl, false);
1196 }
1197 
1198 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1199                                               oop obj,
1200                                               size_t obj_size) {
1201   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1202   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1203   HeapWord* result = NULL;
1204 
1205   result = old_gen->expand_and_allocate(obj_size, false);
1206 
1207   if (result != NULL) {
1208     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1209   }
1210   return oop(result);
1211 }
1212 
1213 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1214   jlong _time;   // in ms
1215   jlong _now;    // in ms
1216 
1217  public:
1218   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1219 
1220   jlong time() { return _time; }
1221 


src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File