< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page
rev 52325 : 8213113: Dead code related to UseAdaptiveSizePolicy in ParNewGeneration
Summary: Removed dead code related to UseAdaptiveSizePolicy for CMS


 871                                                Threads::number_of_non_daemon_threads());
 872   active_workers = workers->update_active_workers(active_workers);
 873   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 874 
 875   _old_gen = gch->old_gen();
 876 
 877   // If the next generation is too full to accommodate worst-case promotion
 878   // from this generation, pass on collection; let the next generation
 879   // do it.
 880   if (!collection_attempt_is_safe()) {
 881     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 882     return;
 883   }
 884   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 885 
 886   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 887   gch->trace_heap_before_gc(gc_tracer());
 888 
 889   init_assuming_no_promotion_failure();
 890 
 891   if (UseAdaptiveSizePolicy) {
 892     set_survivor_overflow(false);
 893     size_policy->minor_collection_begin();
 894   }
 895 
 896   GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
 897 
 898   age_table()->clear();
 899   to()->clear(SpaceDecorator::Mangle);
 900 
 901   gch->save_marks();
 902 
 903   // Set the correct parallelism (number of queues) in the reference processor
 904   ref_processor()->set_active_mt_degree(active_workers);
 905 
 906   // Need to initialize the preserved marks before the ThreadStateSet c'tor.
 907   _preserved_marks_set.init(active_workers);
 908 
 909   // Always set the terminator for the active number of workers
 910   // because only those workers go through the termination protocol.
 911   ParallelTaskTerminator _term(active_workers, task_queues());
 912   ParScanThreadStateSet thread_state_set(active_workers,
 913                                          *to(), *this, *_old_gen, *task_queues(),
 914                                          _overflow_stacks, _preserved_marks_set,


1000     // A successful scavenge should restart the GC time limit count which is
1001     // for full GC's.
1002     size_policy->reset_gc_overhead_limit_count();
1003 
1004     assert(to()->is_empty(), "to space should be empty now");
1005 
1006     adjust_desired_tenuring_threshold();
1007   } else {
1008     handle_promotion_failed(gch, thread_state_set);
1009   }
1010   _preserved_marks_set.reclaim();
1011   // set new iteration safe limit for the survivor spaces
1012   from()->set_concurrent_iteration_safe_limit(from()->top());
1013   to()->set_concurrent_iteration_safe_limit(to()->top());
1014 
1015   plab_stats()->adjust_desired_plab_sz();
1016 
1017   TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1018   TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1019 
1020   if (UseAdaptiveSizePolicy) {
1021     size_policy->minor_collection_end(gch->gc_cause());
1022     size_policy->avg_survived()->sample(from()->used());
1023   }
1024 
1025   // We need to use a monotonically non-decreasing time in ms
1026   // or we will see time-warp warnings and os::javaTimeMillis()
1027   // does not guarantee monotonicity.
1028   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1029   update_time_of_last_gc(now);
1030 
1031   rp->set_enqueuing_is_done(true);
1032   rp->verify_no_references_recorded();
1033 
1034   gch->trace_heap_after_gc(gc_tracer());
1035 
1036   _gc_timer->register_gc_end();
1037 
1038   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1039 }
1040 
1041 size_t ParNewGeneration::desired_plab_sz() {
1042   return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
1043 }


1091   // In the sequential version, this assert also says that the object is
1092   // not forwarded.  That might not be the case here.  It is the case that
1093   // the caller observed it to be not forwarded at some time in the past.
1094   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1095 
1096   // The sequential code read "old->age()" below.  That doesn't work here,
1097   // since the age is in the mark word, and that might be overwritten with
1098   // a forwarding pointer by a parallel thread.  So we must save the mark
1099   // word in a local and then analyze it.
1100   oopDesc dummyOld;
1101   dummyOld.set_mark_raw(m);
1102   assert(!dummyOld.is_forwarded(),
1103          "should not be called with forwarding pointer mark word.");
1104 
1105   oop new_obj = NULL;
1106   oop forward_ptr;
1107 
1108   // Try allocating obj in to-space (unless too old)
1109   if (dummyOld.age() < tenuring_threshold()) {
1110     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1111     if (new_obj == NULL) {
1112       set_survivor_overflow(true);
1113     }
1114   }
1115 
1116   if (new_obj == NULL) {
1117     // Either to-space is full or we decided to promote try allocating obj tenured
1118 
1119     // Attempt to install a null forwarding pointer (atomically),
1120     // to claim the right to install the real forwarding pointer.
1121     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr, m);
1122     if (forward_ptr != NULL) {
1123       // someone else beat us to it.
1124         return real_forwardee(old);
1125     }
1126 
1127     if (!_promotion_failed) {
1128       new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1129                                       old, m, sz);
1130     }
1131 
1132     if (new_obj == NULL) {
1133       // promotion failed, forward to self




 871                                                Threads::number_of_non_daemon_threads());
 872   active_workers = workers->update_active_workers(active_workers);
 873   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 874 
 875   _old_gen = gch->old_gen();
 876 
 877   // If the next generation is too full to accommodate worst-case promotion
 878   // from this generation, pass on collection; let the next generation
 879   // do it.
 880   if (!collection_attempt_is_safe()) {
 881     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 882     return;
 883   }
 884   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 885 
 886   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 887   gch->trace_heap_before_gc(gc_tracer());
 888 
 889   init_assuming_no_promotion_failure();
 890 


 891   size_policy->minor_collection_begin();

 892 
 893   GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
 894 
 895   age_table()->clear();
 896   to()->clear(SpaceDecorator::Mangle);
 897 
 898   gch->save_marks();
 899 
 900   // Set the correct parallelism (number of queues) in the reference processor
 901   ref_processor()->set_active_mt_degree(active_workers);
 902 
 903   // Need to initialize the preserved marks before the ThreadStateSet c'tor.
 904   _preserved_marks_set.init(active_workers);
 905 
 906   // Always set the terminator for the active number of workers
 907   // because only those workers go through the termination protocol.
 908   ParallelTaskTerminator _term(active_workers, task_queues());
 909   ParScanThreadStateSet thread_state_set(active_workers,
 910                                          *to(), *this, *_old_gen, *task_queues(),
 911                                          _overflow_stacks, _preserved_marks_set,


 997     // A successful scavenge should restart the GC time limit count which is
 998     // for full GC's.
 999     size_policy->reset_gc_overhead_limit_count();
1000 
1001     assert(to()->is_empty(), "to space should be empty now");
1002 
1003     adjust_desired_tenuring_threshold();
1004   } else {
1005     handle_promotion_failed(gch, thread_state_set);
1006   }
1007   _preserved_marks_set.reclaim();
1008   // set new iteration safe limit for the survivor spaces
1009   from()->set_concurrent_iteration_safe_limit(from()->top());
1010   to()->set_concurrent_iteration_safe_limit(to()->top());
1011 
1012   plab_stats()->adjust_desired_plab_sz();
1013 
1014   TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1015   TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1016 

1017   size_policy->minor_collection_end(gch->gc_cause());


1018 
1019   // We need to use a monotonically non-decreasing time in ms
1020   // or we will see time-warp warnings and os::javaTimeMillis()
1021   // does not guarantee monotonicity.
1022   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1023   update_time_of_last_gc(now);
1024 
1025   rp->set_enqueuing_is_done(true);
1026   rp->verify_no_references_recorded();
1027 
1028   gch->trace_heap_after_gc(gc_tracer());
1029 
1030   _gc_timer->register_gc_end();
1031 
1032   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1033 }
1034 
1035 size_t ParNewGeneration::desired_plab_sz() {
1036   return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
1037 }


1085   // In the sequential version, this assert also says that the object is
1086   // not forwarded.  That might not be the case here.  It is the case that
1087   // the caller observed it to be not forwarded at some time in the past.
1088   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1089 
1090   // The sequential code read "old->age()" below.  That doesn't work here,
1091   // since the age is in the mark word, and that might be overwritten with
1092   // a forwarding pointer by a parallel thread.  So we must save the mark
1093   // word in a local and then analyze it.
1094   oopDesc dummyOld;
1095   dummyOld.set_mark_raw(m);
1096   assert(!dummyOld.is_forwarded(),
1097          "should not be called with forwarding pointer mark word.");
1098 
1099   oop new_obj = NULL;
1100   oop forward_ptr;
1101 
1102   // Try allocating obj in to-space (unless too old)
1103   if (dummyOld.age() < tenuring_threshold()) {
1104     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);



1105   }
1106 
1107   if (new_obj == NULL) {
1108     // Either to-space is full or we decided to promote try allocating obj tenured
1109 
1110     // Attempt to install a null forwarding pointer (atomically),
1111     // to claim the right to install the real forwarding pointer.
1112     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr, m);
1113     if (forward_ptr != NULL) {
1114       // someone else beat us to it.
1115         return real_forwardee(old);
1116     }
1117 
1118     if (!_promotion_failed) {
1119       new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1120                                       old, m, sz);
1121     }
1122 
1123     if (new_obj == NULL) {
1124       // promotion failed, forward to self


< prev index next >