367 "expanded heap instead");
368 }
369 }
370
371 return success;
372 }
373
374 void DefNewGeneration::compute_new_size() {
375 // This is called after a GC that includes the old generation, so from-space
376 // will normally be empty.
377 // Note that we check both spaces, since if scavenge failed they revert roles.
378 // If not we bail out (otherwise we would have to relocate the objects).
379 if (!from()->is_empty() || !to()->is_empty()) {
380 return;
381 }
382
383 GenCollectedHeap* gch = GenCollectedHeap::heap();
384
385 size_t old_size = gch->old_gen()->capacity();
386 size_t new_size_before = _virtual_space.committed_size();
387 size_t min_new_size = spec()->init_size();
388 size_t max_new_size = reserved().byte_size();
389 assert(min_new_size <= new_size_before &&
390 new_size_before <= max_new_size,
391 "just checking");
392 // All space sizes must be multiples of Generation::GenGrain.
393 size_t alignment = Generation::GenGrain;
394
395 // Compute desired new generation size based on NewRatio and
396 // NewSizeThreadIncrease
397 size_t desired_new_size = old_size/NewRatio;
398 int threads_count = Threads::number_of_non_daemon_threads();
399 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
400 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
401
402 // Adjust new generation size
403 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
404 assert(desired_new_size <= max_new_size, "just checking");
405
406 bool changed = false;
407 if (desired_new_size > new_size_before) {
610 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
611 gch->rem_set()->klass_rem_set());
612 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
613 &fsc_with_no_gc_barrier,
614 false);
615
616 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
617 FastEvacuateFollowersClosure evacuate_followers(gch,
618 &fsc_with_no_gc_barrier,
619 &fsc_with_gc_barrier);
620
621 assert(gch->no_allocs_since_save_marks(),
622 "save marks have not been newly set.");
623
624 {
625 // DefNew needs to run with n_threads == 0, to make sure the serial
626 // version of the card table scanning code is used.
627 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
628 StrongRootsScope srs(0);
629
630 gch->gen_process_roots(&srs,
631 GenCollectedHeap::YoungGen,
632 true, // Process younger gens, if any,
633 // as strong roots.
634 GenCollectedHeap::SO_ScavengeCodeCache,
635 GenCollectedHeap::StrongAndWeakRoots,
636 &fsc_with_no_gc_barrier,
637 &fsc_with_gc_barrier,
638 &cld_scan_closure);
639 }
640
641 // "evacuate followers".
642 evacuate_followers.do_void();
643
644 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
645 ReferenceProcessor* rp = ref_processor();
646 rp->setup_policy(clear_all_soft_refs);
647 const ReferenceProcessorStats& stats =
648 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
649 NULL, _gc_timer, gc_tracer.gc_id());
650 gc_tracer.report_gc_reference_stats(stats);
651
652 if (!_promotion_failed) {
653 // Swap the survivor spaces.
654 eden()->clear(SpaceDecorator::Mangle);
655 from()->clear(SpaceDecorator::Mangle);
656 if (ZapUnusedHeapArea) {
657 // This is now done here because of the piece-meal mangling which
658 // can check for valid mangling at intermediate points in the
|
367 "expanded heap instead");
368 }
369 }
370
371 return success;
372 }
373
374 void DefNewGeneration::compute_new_size() {
375 // This is called after a GC that includes the old generation, so from-space
376 // will normally be empty.
377 // Note that we check both spaces, since if scavenge failed they revert roles.
378 // If not we bail out (otherwise we would have to relocate the objects).
379 if (!from()->is_empty() || !to()->is_empty()) {
380 return;
381 }
382
383 GenCollectedHeap* gch = GenCollectedHeap::heap();
384
385 size_t old_size = gch->old_gen()->capacity();
386 size_t new_size_before = _virtual_space.committed_size();
387 size_t min_new_size = initial_size();
388 size_t max_new_size = reserved().byte_size();
389 assert(min_new_size <= new_size_before &&
390 new_size_before <= max_new_size,
391 "just checking");
392 // All space sizes must be multiples of Generation::GenGrain.
393 size_t alignment = Generation::GenGrain;
394
395 // Compute desired new generation size based on NewRatio and
396 // NewSizeThreadIncrease
397 size_t desired_new_size = old_size/NewRatio;
398 int threads_count = Threads::number_of_non_daemon_threads();
399 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
400 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
401
402 // Adjust new generation size
403 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
404 assert(desired_new_size <= max_new_size, "just checking");
405
406 bool changed = false;
407 if (desired_new_size > new_size_before) {
610 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
611 gch->rem_set()->klass_rem_set());
612 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
613 &fsc_with_no_gc_barrier,
614 false);
615
616 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
617 FastEvacuateFollowersClosure evacuate_followers(gch,
618 &fsc_with_no_gc_barrier,
619 &fsc_with_gc_barrier);
620
621 assert(gch->no_allocs_since_save_marks(),
622 "save marks have not been newly set.");
623
624 {
625 // DefNew needs to run with n_threads == 0, to make sure the serial
626 // version of the card table scanning code is used.
627 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
628 StrongRootsScope srs(0);
629
630 gch->young_process_roots(&srs,
631 &fsc_with_no_gc_barrier,
632 &fsc_with_gc_barrier,
633 &cld_scan_closure);
634 }
635
636 // "evacuate followers".
637 evacuate_followers.do_void();
638
639 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
640 ReferenceProcessor* rp = ref_processor();
641 rp->setup_policy(clear_all_soft_refs);
642 const ReferenceProcessorStats& stats =
643 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
644 NULL, _gc_timer, gc_tracer.gc_id());
645 gc_tracer.report_gc_reference_stats(stats);
646
647 if (!_promotion_failed) {
648 // Swap the survivor spaces.
649 eden()->clear(SpaceDecorator::Mangle);
650 from()->clear(SpaceDecorator::Mangle);
651 if (ZapUnusedHeapArea) {
652 // This is now done here because of the piece-meal mangling which
653 // can check for valid mangling at intermediate points in the
|