22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/shared/cardTableRS.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/gcHeapSummary.hpp"
30 #include "gc/shared/gcLocker.inline.hpp"
31 #include "gc/shared/gcPolicyCounters.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/gcTraceTime.hpp"
35 #include "gc/shared/genCollectedHeap.hpp"
36 #include "gc/shared/genOopClosures.inline.hpp"
37 #include "gc/shared/generationSpec.hpp"
38 #include "gc/shared/referencePolicy.hpp"
39 #include "gc/shared/space.inline.hpp"
40 #include "gc/shared/spaceDecorator.hpp"
41 #include "gc/shared/strongRootsScope.hpp"
42 #include "memory/iterator.hpp"
43 #include "oops/instanceRefKlass.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.inline.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/prefetch.inline.hpp"
48 #include "runtime/thread.inline.hpp"
49 #include "utilities/copy.hpp"
50 #include "utilities/globalDefinitions.hpp"
51 #include "utilities/stack.inline.hpp"
52 #if INCLUDE_ALL_GCS
53 #include "gc/cms/parOopClosures.hpp"
54 #endif
55
56 //
57 // DefNewGeneration functions.
58
59 // Methods of protected closure types.
60
61 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
118 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
119 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
120 {
121 _boundary = _g->reserved().end();
122 }
123
124 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
126
127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
128 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
129 {
130 _boundary = _g->reserved().end();
131 }
132
133 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
134 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
135
136 void KlassScanClosure::do_klass(Klass* klass) {
137 #ifndef PRODUCT
138 if (TraceScavenge) {
139 ResourceMark rm;
140 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
141 p2i(klass),
142 klass->external_name(),
143 klass->has_modified_oops() ? "true" : "false");
144 }
145 #endif
146
147 // If the klass has not been dirtied we know that there's
148 // no references into the young gen and we can skip it.
149 if (klass->has_modified_oops()) {
150 if (_accumulate_modified_oops) {
151 klass->accumulate_modified_oops();
152 }
153
154 // Clear this state since we're going to scavenge all the metadata.
155 klass->clear_modified_oops();
156
157 // Tell the closure which Klass is being scanned so that it can be dirtied
158 // if oops are left pointing into the young gen.
159 _scavenge_closure->set_scanned_klass(klass);
160
161 klass->oops_do(_scavenge_closure);
162
163 _scavenge_closure->set_scanned_klass(NULL);
164 }
344 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
345 bool success = _virtual_space.expand_by(bytes);
346 if (success && ZapUnusedHeapArea) {
347 // Mangle newly committed space immediately because it
348 // can be done here more simply that after the new
349 // spaces have been computed.
350 HeapWord* new_high = (HeapWord*) _virtual_space.high();
351 MemRegion mangle_region(prev_high, new_high);
352 SpaceMangler::mangle_region(mangle_region);
353 }
354
355 // Do not attempt an expand-to-the reserve size. The
356 // request should properly observe the maximum size of
357 // the generation so an expand-to-reserve should be
358 // unnecessary. Also a second call to expand-to-reserve
359 // value potentially can cause an undue expansion.
360 // For example if the first expand fail for unknown reasons,
361 // but the second succeeds and expands the heap to its maximum
362 // value.
363 if (GC_locker::is_active()) {
364 if (PrintGC && Verbose) {
365 gclog_or_tty->print_cr("Garbage collection disabled, "
366 "expanded heap instead");
367 }
368 }
369
370 return success;
371 }
372
373 void DefNewGeneration::compute_new_size() {
374 // This is called after a GC that includes the old generation, so from-space
375 // will normally be empty.
376 // Note that we check both spaces, since if scavenge failed they revert roles.
377 // If not we bail out (otherwise we would have to relocate the objects).
378 if (!from()->is_empty() || !to()->is_empty()) {
379 return;
380 }
381
382 GenCollectedHeap* gch = GenCollectedHeap::heap();
383
384 size_t old_size = gch->old_gen()->capacity();
385 size_t new_size_before = _virtual_space.committed_size();
386 size_t min_new_size = initial_size();
387 size_t max_new_size = reserved().byte_size();
414 // (and at this point it was expected to succeed),
415 // ignore the failure (leaving "changed" as false).
416 }
417 if (desired_new_size < new_size_before && eden()->is_empty()) {
418 // bail out of shrinking if objects in eden
419 size_t change = new_size_before - desired_new_size;
420 assert(change % alignment == 0, "just checking");
421 _virtual_space.shrink_by(change);
422 changed = true;
423 }
424 if (changed) {
425 // The spaces have already been mangled at this point but
426 // may not have been cleared (set top = bottom) and should be.
427 // Mangling was done when the heap was being expanded.
428 compute_space_boundaries(eden()->used(),
429 SpaceDecorator::Clear,
430 SpaceDecorator::DontMangle);
431 MemRegion cmr((HeapWord*)_virtual_space.low(),
432 (HeapWord*)_virtual_space.high());
433 gch->barrier_set()->resize_covered_region(cmr);
434 if (Verbose && PrintGC) {
435 size_t new_size_after = _virtual_space.committed_size();
436 size_t eden_size_after = eden()->capacity();
437 size_t survivor_size_after = from()->capacity();
438 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
439 SIZE_FORMAT "K [eden="
440 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
441 new_size_before/K, new_size_after/K,
442 eden_size_after/K, survivor_size_after/K);
443 if (WizardMode) {
444 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
445 thread_increase_size/K, threads_count);
446 }
447 gclog_or_tty->cr();
448 }
449 }
450 }
451
452 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
453 assert(false, "NYI -- are you sure you want to call this?");
454 }
455
456
457 size_t DefNewGeneration::capacity() const {
458 return eden()->capacity()
459 + from()->capacity(); // to() is only used during scavenge
460 }
461
462
463 size_t DefNewGeneration::used() const {
464 return eden()->used()
465 + from()->used(); // to() is only used during scavenge
466 }
467
468
469 size_t DefNewGeneration::free() const {
492
493 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
494 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
495
496 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
497 eden()->object_iterate(blk);
498 from()->object_iterate(blk);
499 }
500
501
502 void DefNewGeneration::space_iterate(SpaceClosure* blk,
503 bool usedOnly) {
504 blk->do_space(eden());
505 blk->do_space(from());
506 blk->do_space(to());
507 }
508
509 // The last collection bailed out, we are running out of heap space,
510 // so we try to allocate the from-space, too.
511 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
512 HeapWord* result = NULL;
513 if (Verbose && PrintGCDetails) {
514 gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):"
515 " will_fail: %s"
516 " heap_lock: %s"
517 " free: " SIZE_FORMAT,
518 size,
519 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
520 "true" : "false",
521 Heap_lock->is_locked() ? "locked" : "unlocked",
522 from()->free());
523 }
524 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
525 if (Heap_lock->owned_by_self() ||
526 (SafepointSynchronize::is_at_safepoint() &&
527 Thread::current()->is_VM_thread())) {
528 // If the Heap_lock is not locked by this thread, this will be called
529 // again later with the Heap_lock held.
530 result = from()->allocate(size);
531 } else if (PrintGC && Verbose) {
532 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
533 }
534 } else if (PrintGC && Verbose) {
535 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
536 }
537 if (PrintGC && Verbose) {
538 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
539 }
540 return result;
541 }
542
543 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
544 bool is_tlab,
545 bool parallel) {
546 // We don't attempt to expand the young generation (but perhaps we should.)
547 return allocate(size, is_tlab);
548 }
549
550 void DefNewGeneration::adjust_desired_tenuring_threshold() {
551 // Set the desired survivor size to half the real survivor space
552 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
553 _tenuring_threshold =
554 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
555 }
556
557 void DefNewGeneration::collect(bool full,
558 bool clear_all_soft_refs,
559 size_t size,
560 bool is_tlab) {
561 assert(full || size > 0, "otherwise we don't want to collect");
562
563 GenCollectedHeap* gch = GenCollectedHeap::heap();
564
565 _gc_timer->register_gc_start();
566 DefNewTracer gc_tracer;
567 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
568
569 _old_gen = gch->old_gen();
570
571 // If the next generation is too full to accommodate promotion
572 // from this generation, pass on collection; let the next generation
573 // do it.
574 if (!collection_attempt_is_safe()) {
575 if (Verbose && PrintGCDetails) {
576 gclog_or_tty->print(" :: Collection attempt not safe :: ");
577 }
578 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
579 return;
580 }
581 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
582
583 init_assuming_no_promotion_failure();
584
585 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
586 // Capture heap used before collection (for printing).
587 size_t gch_prev_used = gch->used();
588
589 gch->trace_heap_before_gc(&gc_tracer);
590
591 // These can be shared for all code paths
592 IsAliveClosure is_alive(this);
593 ScanWeakRefClosure scan_weak_ref(this);
594
595 age_table()->clear();
596 to()->clear(SpaceDecorator::Mangle);
597
598 gch->rem_set()->prepare_for_younger_refs_iterate(false);
599
600 assert(gch->no_allocs_since_save_marks(),
601 "save marks have not been newly set.");
602
603 // Not very pretty.
604 CollectorPolicy* cp = gch->collector_policy();
605
606 FastScanClosure fsc_with_no_gc_barrier(this, false);
607 FastScanClosure fsc_with_gc_barrier(this, true);
662 // here so that unzapped regions don't get distributed to
663 // other spaces.
664 to()->mangle_unused_area();
665 }
666 swap_spaces();
667
668 assert(to()->is_empty(), "to space should be empty now");
669
670 adjust_desired_tenuring_threshold();
671
672 // A successful scavenge should restart the GC time limit count which is
673 // for full GC's.
674 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
675 size_policy->reset_gc_overhead_limit_count();
676 assert(!gch->incremental_collection_failed(), "Should be clear");
677 } else {
678 assert(_promo_failure_scan_stack.is_empty(), "post condition");
679 _promo_failure_scan_stack.clear(true); // Clear cached segments.
680
681 remove_forwarding_pointers();
682 if (PrintGCDetails) {
683 gclog_or_tty->print(" (promotion failed) ");
684 }
685 // Add to-space to the list of space to compact
686 // when a promotion failure has occurred. In that
687 // case there can be live objects in to-space
688 // as a result of a partial evacuation of eden
689 // and from-space.
690 swap_spaces(); // For uniformity wrt ParNewGeneration.
691 from()->set_next_compaction_space(to());
692 gch->set_incremental_collection_failed();
693
694 // Inform the next generation that a promotion failure occurred.
695 _old_gen->promotion_failure_occurred();
696 gc_tracer.report_promotion_failed(_promotion_failed_info);
697
698 // Reset the PromotionFailureALot counters.
699 NOT_PRODUCT(gch->reset_promotion_should_fail();)
700 }
701 if (PrintGC && !PrintGCDetails) {
702 gch->print_heap_change(gch_prev_used);
703 }
704 // set new iteration safe limit for the survivor spaces
705 from()->set_concurrent_iteration_safe_limit(from()->top());
706 to()->set_concurrent_iteration_safe_limit(to()->top());
707
708 // We need to use a monotonically non-decreasing time in ms
709 // or we will see time-warp warnings and os::javaTimeMillis()
710 // does not guarantee monotonicity.
711 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
712 update_time_of_last_gc(now);
713
714 gch->trace_heap_after_gc(&gc_tracer);
715
716 _gc_timer->register_gc_end();
717
718 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
719 }
720
721 class RemoveForwardPointerClosure: public ObjectClosure {
722 public:
723 void do_object(oop obj) {
745 obj->set_mark(m);
746 }
747 _objs_with_preserved_marks.clear(true);
748 _preserved_marks_of_objs.clear(true);
749 }
750
751 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
752 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
753 "Oversaving!");
754 _objs_with_preserved_marks.push(obj);
755 _preserved_marks_of_objs.push(m);
756 }
757
758 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
759 if (m->must_be_preserved_for_promotion_failure(obj)) {
760 preserve_mark(obj, m);
761 }
762 }
763
764 void DefNewGeneration::handle_promotion_failure(oop old) {
765 if (PrintPromotionFailure && !_promotion_failed) {
766 gclog_or_tty->print(" (promotion failure size = %d) ",
767 old->size());
768 }
769 _promotion_failed = true;
770 _promotion_failed_info.register_copy_failure(old->size());
771 preserve_mark_if_necessary(old, old->mark());
772 // forward to self
773 old->forward_to(old);
774
775 _promo_failure_scan_stack.push(old);
776
777 if (!_promo_failure_drain_in_progress) {
778 // prevent recursion in copy_to_survivor_space()
779 _promo_failure_drain_in_progress = true;
780 drain_promo_failure_scan_stack();
781 _promo_failure_drain_in_progress = false;
782 }
783 }
784
785 oop DefNewGeneration::copy_to_survivor_space(oop old) {
786 assert(is_in_reserved(old) && !old->is_forwarded(),
787 "shouldn't be scavenging this oop");
788 size_t s = old->size();
880 size_t free_words = pointer_delta(to_space->end(), to_space->top());
881 if (free_words >= MinFreeScratchWords) {
882 ScratchBlock* sb = (ScratchBlock*)to_space->top();
883 sb->num_words = free_words;
884 sb->next = list;
885 list = sb;
886 }
887 }
888
889 void DefNewGeneration::reset_scratch() {
890 // If contributing scratch in to_space, mangle all of
891 // to_space if ZapUnusedHeapArea. This is needed because
892 // top is not maintained while using to-space as scratch.
893 if (ZapUnusedHeapArea) {
894 to()->mangle_unused_area_complete();
895 }
896 }
897
898 bool DefNewGeneration::collection_attempt_is_safe() {
899 if (!to()->is_empty()) {
900 if (Verbose && PrintGCDetails) {
901 gclog_or_tty->print(" :: to is not empty :: ");
902 }
903 return false;
904 }
905 if (_old_gen == NULL) {
906 GenCollectedHeap* gch = GenCollectedHeap::heap();
907 _old_gen = gch->old_gen();
908 }
909 return _old_gen->promotion_attempt_is_safe(used());
910 }
911
912 void DefNewGeneration::gc_epilogue(bool full) {
913 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
914
915 assert(!GC_locker::is_active(), "We should not be executing here");
916 // Check if the heap is approaching full after a collection has
917 // been done. Generally the young generation is empty at
918 // a minimum at the end of a collection. If it is not, then
919 // the heap is approaching full.
920 GenCollectedHeap* gch = GenCollectedHeap::heap();
921 if (full) {
922 DEBUG_ONLY(seen_incremental_collection_failed = false;)
923 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
924 if (Verbose && PrintGCDetails) {
925 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
926 GCCause::to_string(gch->gc_cause()));
927 }
928 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
929 set_should_allocate_from_space(); // we seem to be running out of space
930 } else {
931 if (Verbose && PrintGCDetails) {
932 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
933 GCCause::to_string(gch->gc_cause()));
934 }
935 gch->clear_incremental_collection_failed(); // We just did a full collection
936 clear_should_allocate_from_space(); // if set
937 }
938 } else {
939 #ifdef ASSERT
940 // It is possible that incremental_collection_failed() == true
941 // here, because an attempted scavenge did not succeed. The policy
942 // is normally expected to cause a full collection which should
943 // clear that condition, so we should not be here twice in a row
944 // with incremental_collection_failed() == true without having done
945 // a full collection in between.
946 if (!seen_incremental_collection_failed &&
947 gch->incremental_collection_failed()) {
948 if (Verbose && PrintGCDetails) {
949 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
950 GCCause::to_string(gch->gc_cause()));
951 }
952 seen_incremental_collection_failed = true;
953 } else if (seen_incremental_collection_failed) {
954 if (Verbose && PrintGCDetails) {
955 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
956 GCCause::to_string(gch->gc_cause()));
957 }
958 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
959 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
960 !gch->incremental_collection_failed(),
961 "Twice in a row");
962 seen_incremental_collection_failed = false;
963 }
964 #endif // ASSERT
965 }
966
967 if (ZapUnusedHeapArea) {
968 eden()->check_mangled_unused_area_complete();
969 from()->check_mangled_unused_area_complete();
970 to()->check_mangled_unused_area_complete();
971 }
972
973 if (!CleanChunkPoolAsync) {
974 Chunk::clean_chunk_pool();
975 }
976
977 // update the generation and space performance counters
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/shared/cardTableRS.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/gcHeapSummary.hpp"
30 #include "gc/shared/gcLocker.inline.hpp"
31 #include "gc/shared/gcPolicyCounters.hpp"
32 #include "gc/shared/gcTimer.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/gcTraceTime.hpp"
35 #include "gc/shared/genCollectedHeap.hpp"
36 #include "gc/shared/genOopClosures.inline.hpp"
37 #include "gc/shared/generationSpec.hpp"
38 #include "gc/shared/referencePolicy.hpp"
39 #include "gc/shared/space.inline.hpp"
40 #include "gc/shared/spaceDecorator.hpp"
41 #include "gc/shared/strongRootsScope.hpp"
42 #include "logging/log.hpp"
43 #include "memory/iterator.hpp"
44 #include "oops/instanceRefKlass.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/atomic.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/prefetch.inline.hpp"
49 #include "runtime/thread.inline.hpp"
50 #include "utilities/copy.hpp"
51 #include "utilities/globalDefinitions.hpp"
52 #include "utilities/stack.inline.hpp"
53 #if INCLUDE_ALL_GCS
54 #include "gc/cms/parOopClosures.hpp"
55 #endif
56
57 //
58 // DefNewGeneration functions.
59
60 // Methods of protected closure types.
61
62 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
119 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
120 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
121 {
122 _boundary = _g->reserved().end();
123 }
124
125 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
126 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
127
128 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
129 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
130 {
131 _boundary = _g->reserved().end();
132 }
133
134 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
136
137 void KlassScanClosure::do_klass(Klass* klass) {
138 #ifndef PRODUCT
139 ResourceMark rm;
140 log_develop(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
141 p2i(klass),
142 klass->external_name(),
143 klass->has_modified_oops() ? "true" : "false");
144 #endif
145
146 // If the klass has not been dirtied we know that there's
147 // no references into the young gen and we can skip it.
148 if (klass->has_modified_oops()) {
149 if (_accumulate_modified_oops) {
150 klass->accumulate_modified_oops();
151 }
152
153 // Clear this state since we're going to scavenge all the metadata.
154 klass->clear_modified_oops();
155
156 // Tell the closure which Klass is being scanned so that it can be dirtied
157 // if oops are left pointing into the young gen.
158 _scavenge_closure->set_scanned_klass(klass);
159
160 klass->oops_do(_scavenge_closure);
161
162 _scavenge_closure->set_scanned_klass(NULL);
163 }
343 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
344 bool success = _virtual_space.expand_by(bytes);
345 if (success && ZapUnusedHeapArea) {
346 // Mangle newly committed space immediately because it
347 // can be done here more simply that after the new
348 // spaces have been computed.
349 HeapWord* new_high = (HeapWord*) _virtual_space.high();
350 MemRegion mangle_region(prev_high, new_high);
351 SpaceMangler::mangle_region(mangle_region);
352 }
353
354 // Do not attempt an expand-to-the reserve size. The
355 // request should properly observe the maximum size of
356 // the generation so an expand-to-reserve should be
357 // unnecessary. Also a second call to expand-to-reserve
358 // value potentially can cause an undue expansion.
359 // For example if the first expand fail for unknown reasons,
360 // but the second succeeds and expands the heap to its maximum
361 // value.
362 if (GC_locker::is_active()) {
363 log_debug(gc)("Garbage collection disabled, expanded heap instead");
364 }
365
366 return success;
367 }
368
369 void DefNewGeneration::compute_new_size() {
370 // This is called after a GC that includes the old generation, so from-space
371 // will normally be empty.
372 // Note that we check both spaces, since if scavenge failed they revert roles.
373 // If not we bail out (otherwise we would have to relocate the objects).
374 if (!from()->is_empty() || !to()->is_empty()) {
375 return;
376 }
377
378 GenCollectedHeap* gch = GenCollectedHeap::heap();
379
380 size_t old_size = gch->old_gen()->capacity();
381 size_t new_size_before = _virtual_space.committed_size();
382 size_t min_new_size = initial_size();
383 size_t max_new_size = reserved().byte_size();
410 // (and at this point it was expected to succeed),
411 // ignore the failure (leaving "changed" as false).
412 }
413 if (desired_new_size < new_size_before && eden()->is_empty()) {
414 // bail out of shrinking if objects in eden
415 size_t change = new_size_before - desired_new_size;
416 assert(change % alignment == 0, "just checking");
417 _virtual_space.shrink_by(change);
418 changed = true;
419 }
420 if (changed) {
421 // The spaces have already been mangled at this point but
422 // may not have been cleared (set top = bottom) and should be.
423 // Mangling was done when the heap was being expanded.
424 compute_space_boundaries(eden()->used(),
425 SpaceDecorator::Clear,
426 SpaceDecorator::DontMangle);
427 MemRegion cmr((HeapWord*)_virtual_space.low(),
428 (HeapWord*)_virtual_space.high());
429 gch->barrier_set()->resize_covered_region(cmr);
430
431 log_debug(gc, heap, ergo)(
432 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
433 new_size_before/K, _virtual_space.committed_size()/K,
434 eden()->capacity()/K, from()->capacity()/K);
435 log_trace(gc, heap, ergo)(
436 " [allowed " SIZE_FORMAT "K extra for %d threads]",
437 thread_increase_size/K, threads_count);
438 }
439 }
440
441 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
442 assert(false, "NYI -- are you sure you want to call this?");
443 }
444
445
446 size_t DefNewGeneration::capacity() const {
447 return eden()->capacity()
448 + from()->capacity(); // to() is only used during scavenge
449 }
450
451
452 size_t DefNewGeneration::used() const {
453 return eden()->used()
454 + from()->used(); // to() is only used during scavenge
455 }
456
457
458 size_t DefNewGeneration::free() const {
481
482 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
483 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
484
485 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
486 eden()->object_iterate(blk);
487 from()->object_iterate(blk);
488 }
489
490
491 void DefNewGeneration::space_iterate(SpaceClosure* blk,
492 bool usedOnly) {
493 blk->do_space(eden());
494 blk->do_space(from());
495 blk->do_space(to());
496 }
497
498 // The last collection bailed out, we are running out of heap space,
499 // so we try to allocate the from-space, too.
500 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
501 bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc();
502
503 // If the Heap_lock is not locked by this thread, this will be called
504 // again later with the Heap_lock held.
505 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
506
507 HeapWord* result = NULL;
508 if (do_alloc) {
509 result = from()->allocate(size);
510 }
511
512 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s",
513 size,
514 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
515 "true" : "false",
516 Heap_lock->is_locked() ? "locked" : "unlocked",
517 from()->free(),
518 should_try_alloc ? "" : " should_allocate_from_space: NOT",
519 do_alloc ? " Heap_lock is not owned by self" : "",
520 result == NULL ? "NULL" : "object");
521
522 return result;
523 }
524
525 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
526 bool is_tlab,
527 bool parallel) {
528 // We don't attempt to expand the young generation (but perhaps we should.)
529 return allocate(size, is_tlab);
530 }
531
532 void DefNewGeneration::adjust_desired_tenuring_threshold() {
533 // Set the desired survivor size to half the real survivor space
534 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
535 _tenuring_threshold =
536 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
537 }
538
539 void DefNewGeneration::collect(bool full,
540 bool clear_all_soft_refs,
541 size_t size,
542 bool is_tlab) {
543 assert(full || size > 0, "otherwise we don't want to collect");
544
545 GenCollectedHeap* gch = GenCollectedHeap::heap();
546
547 _gc_timer->register_gc_start();
548 DefNewTracer gc_tracer;
549 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
550
551 _old_gen = gch->old_gen();
552
553 // If the next generation is too full to accommodate promotion
554 // from this generation, pass on collection; let the next generation
555 // do it.
556 if (!collection_attempt_is_safe()) {
557 log_trace(gc)(":: Collection attempt not safe ::");
558 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
559 return;
560 }
561 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
562
563 init_assuming_no_promotion_failure();
564
565 GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());
566
567 gch->trace_heap_before_gc(&gc_tracer);
568
569 // These can be shared for all code paths
570 IsAliveClosure is_alive(this);
571 ScanWeakRefClosure scan_weak_ref(this);
572
573 age_table()->clear();
574 to()->clear(SpaceDecorator::Mangle);
575
576 gch->rem_set()->prepare_for_younger_refs_iterate(false);
577
578 assert(gch->no_allocs_since_save_marks(),
579 "save marks have not been newly set.");
580
581 // Not very pretty.
582 CollectorPolicy* cp = gch->collector_policy();
583
584 FastScanClosure fsc_with_no_gc_barrier(this, false);
585 FastScanClosure fsc_with_gc_barrier(this, true);
640 // here so that unzapped regions don't get distributed to
641 // other spaces.
642 to()->mangle_unused_area();
643 }
644 swap_spaces();
645
646 assert(to()->is_empty(), "to space should be empty now");
647
648 adjust_desired_tenuring_threshold();
649
650 // A successful scavenge should restart the GC time limit count which is
651 // for full GC's.
652 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
653 size_policy->reset_gc_overhead_limit_count();
654 assert(!gch->incremental_collection_failed(), "Should be clear");
655 } else {
656 assert(_promo_failure_scan_stack.is_empty(), "post condition");
657 _promo_failure_scan_stack.clear(true); // Clear cached segments.
658
659 remove_forwarding_pointers();
660 log_debug(gc)("Promotion failed");
661 // Add to-space to the list of space to compact
662 // when a promotion failure has occurred. In that
663 // case there can be live objects in to-space
664 // as a result of a partial evacuation of eden
665 // and from-space.
666 swap_spaces(); // For uniformity wrt ParNewGeneration.
667 from()->set_next_compaction_space(to());
668 gch->set_incremental_collection_failed();
669
670 // Inform the next generation that a promotion failure occurred.
671 _old_gen->promotion_failure_occurred();
672 gc_tracer.report_promotion_failed(_promotion_failed_info);
673
674 // Reset the PromotionFailureALot counters.
675 NOT_PRODUCT(gch->reset_promotion_should_fail();)
676 }
677 // set new iteration safe limit for the survivor spaces
678 from()->set_concurrent_iteration_safe_limit(from()->top());
679 to()->set_concurrent_iteration_safe_limit(to()->top());
680
681 // We need to use a monotonically non-decreasing time in ms
682 // or we will see time-warp warnings and os::javaTimeMillis()
683 // does not guarantee monotonicity.
684 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
685 update_time_of_last_gc(now);
686
687 gch->trace_heap_after_gc(&gc_tracer);
688
689 _gc_timer->register_gc_end();
690
691 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
692 }
693
694 class RemoveForwardPointerClosure: public ObjectClosure {
695 public:
696 void do_object(oop obj) {
718 obj->set_mark(m);
719 }
720 _objs_with_preserved_marks.clear(true);
721 _preserved_marks_of_objs.clear(true);
722 }
723
724 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
725 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
726 "Oversaving!");
727 _objs_with_preserved_marks.push(obj);
728 _preserved_marks_of_objs.push(m);
729 }
730
731 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
732 if (m->must_be_preserved_for_promotion_failure(obj)) {
733 preserve_mark(obj, m);
734 }
735 }
736
737 void DefNewGeneration::handle_promotion_failure(oop old) {
738 log_debug(promotion)("Promotion failure size = %d) ", old->size());
739
740 _promotion_failed = true;
741 _promotion_failed_info.register_copy_failure(old->size());
742 preserve_mark_if_necessary(old, old->mark());
743 // forward to self
744 old->forward_to(old);
745
746 _promo_failure_scan_stack.push(old);
747
748 if (!_promo_failure_drain_in_progress) {
749 // prevent recursion in copy_to_survivor_space()
750 _promo_failure_drain_in_progress = true;
751 drain_promo_failure_scan_stack();
752 _promo_failure_drain_in_progress = false;
753 }
754 }
755
756 oop DefNewGeneration::copy_to_survivor_space(oop old) {
757 assert(is_in_reserved(old) && !old->is_forwarded(),
758 "shouldn't be scavenging this oop");
759 size_t s = old->size();
851 size_t free_words = pointer_delta(to_space->end(), to_space->top());
852 if (free_words >= MinFreeScratchWords) {
853 ScratchBlock* sb = (ScratchBlock*)to_space->top();
854 sb->num_words = free_words;
855 sb->next = list;
856 list = sb;
857 }
858 }
859
860 void DefNewGeneration::reset_scratch() {
861 // If contributing scratch in to_space, mangle all of
862 // to_space if ZapUnusedHeapArea. This is needed because
863 // top is not maintained while using to-space as scratch.
864 if (ZapUnusedHeapArea) {
865 to()->mangle_unused_area_complete();
866 }
867 }
868
869 bool DefNewGeneration::collection_attempt_is_safe() {
870 if (!to()->is_empty()) {
871 log_trace(gc)(":: to is not empty ::");
872 return false;
873 }
874 if (_old_gen == NULL) {
875 GenCollectedHeap* gch = GenCollectedHeap::heap();
876 _old_gen = gch->old_gen();
877 }
878 return _old_gen->promotion_attempt_is_safe(used());
879 }
880
881 void DefNewGeneration::gc_epilogue(bool full) {
882 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
883
884 assert(!GC_locker::is_active(), "We should not be executing here");
885 // Check if the heap is approaching full after a collection has
886 // been done. Generally the young generation is empty at
887 // a minimum at the end of a collection. If it is not, then
888 // the heap is approaching full.
889 GenCollectedHeap* gch = GenCollectedHeap::heap();
890 if (full) {
891 DEBUG_ONLY(seen_incremental_collection_failed = false;)
892 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
893 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
894 GCCause::to_string(gch->gc_cause()));
895 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
896 set_should_allocate_from_space(); // we seem to be running out of space
897 } else {
898 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
899 GCCause::to_string(gch->gc_cause()));
900 gch->clear_incremental_collection_failed(); // We just did a full collection
901 clear_should_allocate_from_space(); // if set
902 }
903 } else {
904 #ifdef ASSERT
905 // It is possible that incremental_collection_failed() == true
906 // here, because an attempted scavenge did not succeed. The policy
907 // is normally expected to cause a full collection which should
908 // clear that condition, so we should not be here twice in a row
909 // with incremental_collection_failed() == true without having done
910 // a full collection in between.
911 if (!seen_incremental_collection_failed &&
912 gch->incremental_collection_failed()) {
913 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
914 GCCause::to_string(gch->gc_cause()));
915 seen_incremental_collection_failed = true;
916 } else if (seen_incremental_collection_failed) {
917 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
918 GCCause::to_string(gch->gc_cause()));
919 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
920 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
921 !gch->incremental_collection_failed(),
922 "Twice in a row");
923 seen_incremental_collection_failed = false;
924 }
925 #endif // ASSERT
926 }
927
928 if (ZapUnusedHeapArea) {
929 eden()->check_mangled_unused_area_complete();
930 from()->check_mangled_unused_area_complete();
931 to()->check_mangled_unused_area_complete();
932 }
933
934 if (!CleanChunkPoolAsync) {
935 Chunk::clean_chunk_pool();
936 }
937
938 // update the generation and space performance counters
|