881 NOT_PRODUCT(gch->reset_promotion_should_fail();)
882 }
883
884 void ParNewGeneration::collect(bool full,
885 bool clear_all_soft_refs,
886 size_t size,
887 bool is_tlab) {
888 assert(full || size > 0, "otherwise we don't want to collect");
889
890 GenCollectedHeap* gch = GenCollectedHeap::heap();
891
892 _gc_timer->register_gc_start();
893
894 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
895 WorkGang* workers = gch->workers();
896 assert(workers != NULL, "Need workgang for parallel work");
897 uint active_workers =
898 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
899 workers->active_workers(),
900 Threads::number_of_non_daemon_threads());
901 workers->set_active_workers(active_workers);
902 _old_gen = gch->old_gen();
903
904 // If the next generation is too full to accommodate worst-case promotion
905 // from this generation, pass on collection; let the next generation
906 // do it.
907 if (!collection_attempt_is_safe()) {
908 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
909 return;
910 }
911 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
912
913 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
914 gch->trace_heap_before_gc(gc_tracer());
915
916 init_assuming_no_promotion_failure();
917
918 if (UseAdaptiveSizePolicy) {
919 set_survivor_overflow(false);
920 size_policy->minor_collection_begin();
921 }
935
936 // Always set the terminator for the active number of workers
937 // because only those workers go through the termination protocol.
938 ParallelTaskTerminator _term(active_workers, task_queues());
939 ParScanThreadStateSet thread_state_set(active_workers,
940 *to(), *this, *_old_gen, *task_queues(),
941 _overflow_stacks, _preserved_marks_set,
942 desired_plab_sz(), _term);
943
944 thread_state_set.reset(active_workers, promotion_failed());
945
946 {
947 StrongRootsScope srs(active_workers);
948
949 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
950 gch->rem_set()->prepare_for_younger_refs_iterate(true);
951 // It turns out that even when we're using 1 thread, doing the work in a
952 // separate thread causes wide variance in run times. We can't help this
953 // in the multi-threaded case, but we special-case n=1 here to get
954 // repeatable measurements of the 1-thread overhead of the parallel code.
955 if (active_workers > 1) {
956 workers->run_task(&tsk);
957 } else {
958 tsk.work(0);
959 }
960 }
961
962 thread_state_set.reset(0 /* Bad value in debug if not reset */,
963 promotion_failed());
964
965 // Trace and reset failed promotion info.
966 if (promotion_failed()) {
967 thread_state_set.trace_promotion_failed(gc_tracer());
968 }
969
970 // Process (weak) reference objects found during scavenge.
971 ReferenceProcessor* rp = ref_processor();
972 IsAliveClosure is_alive(this);
973 ScanWeakRefClosure scan_weak_ref(this);
974 KeepAliveClosure keep_alive(&scan_weak_ref);
975 ScanClosure scan_without_gc_barrier(this, false);
|
881 NOT_PRODUCT(gch->reset_promotion_should_fail();)
882 }
883
884 void ParNewGeneration::collect(bool full,
885 bool clear_all_soft_refs,
886 size_t size,
887 bool is_tlab) {
888 assert(full || size > 0, "otherwise we don't want to collect");
889
890 GenCollectedHeap* gch = GenCollectedHeap::heap();
891
892 _gc_timer->register_gc_start();
893
894 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
895 WorkGang* workers = gch->workers();
896 assert(workers != NULL, "Need workgang for parallel work");
897 uint active_workers =
898 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
899 workers->active_workers(),
900 Threads::number_of_non_daemon_threads());
901 active_workers = workers->update_active_workers(active_workers);
902 _old_gen = gch->old_gen();
903
904 // If the next generation is too full to accommodate worst-case promotion
905 // from this generation, pass on collection; let the next generation
906 // do it.
907 if (!collection_attempt_is_safe()) {
908 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
909 return;
910 }
911 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
912
913 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
914 gch->trace_heap_before_gc(gc_tracer());
915
916 init_assuming_no_promotion_failure();
917
918 if (UseAdaptiveSizePolicy) {
919 set_survivor_overflow(false);
920 size_policy->minor_collection_begin();
921 }
935
936 // Always set the terminator for the active number of workers
937 // because only those workers go through the termination protocol.
938 ParallelTaskTerminator _term(active_workers, task_queues());
939 ParScanThreadStateSet thread_state_set(active_workers,
940 *to(), *this, *_old_gen, *task_queues(),
941 _overflow_stacks, _preserved_marks_set,
942 desired_plab_sz(), _term);
943
944 thread_state_set.reset(active_workers, promotion_failed());
945
946 {
947 StrongRootsScope srs(active_workers);
948
949 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
950 gch->rem_set()->prepare_for_younger_refs_iterate(true);
951 // It turns out that even when we're using 1 thread, doing the work in a
952 // separate thread causes wide variance in run times. We can't help this
953 // in the multi-threaded case, but we special-case n=1 here to get
954 // repeatable measurements of the 1-thread overhead of the parallel code.
955 // Might multiple workers ever be used? If yes, initialization
956 // has been done such that the single threaded path should not be used.
957 if (workers->total_workers() > 1) {
958 workers->run_task(&tsk);
959 } else {
960 tsk.work(0);
961 }
962 }
963
964 thread_state_set.reset(0 /* Bad value in debug if not reset */,
965 promotion_failed());
966
967 // Trace and reset failed promotion info.
968 if (promotion_failed()) {
969 thread_state_set.trace_promotion_failed(gc_tracer());
970 }
971
972 // Process (weak) reference objects found during scavenge.
973 ReferenceProcessor* rp = ref_processor();
974 IsAliveClosure is_alive(this);
975 ScanWeakRefClosure scan_weak_ref(this);
976 KeepAliveClosure keep_alive(&scan_weak_ref);
977 ScanClosure scan_without_gc_barrier(this, false);
|