< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page
rev 13233 : [mq]: 8179387.patch


   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "gc/cms/compactibleFreeListSpace.hpp"
  27 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
  28 #include "gc/cms/parNewGeneration.inline.hpp"
  29 #include "gc/cms/parOopClosures.inline.hpp"
  30 #include "gc/serial/defNewGeneration.inline.hpp"
  31 #include "gc/shared/adaptiveSizePolicy.hpp"
  32 #include "gc/shared/ageTable.inline.hpp"
  33 #include "gc/shared/copyFailedInfo.hpp"
  34 #include "gc/shared/gcHeapSummary.hpp"
  35 #include "gc/shared/gcTimer.hpp"
  36 #include "gc/shared/gcTrace.hpp"
  37 #include "gc/shared/gcTraceTime.inline.hpp"
  38 #include "gc/shared/genCollectedHeap.hpp"
  39 #include "gc/shared/genOopClosures.inline.hpp"
  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/preservedMarks.inline.hpp"
  43 #include "gc/shared/referencePolicy.hpp"
  44 #include "gc/shared/space.hpp"
  45 #include "gc/shared/spaceDecorator.hpp"


 787              par_scan_state.keep_alive_closure(),
 788              par_scan_state.evacuate_followers_closure());
 789 }
 790 
 791 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 792   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 793   EnqueueTask& _task;
 794 
 795 public:
 796   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 797     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 798       _task(task)
 799   { }
 800 
 801   virtual void work(uint worker_id) {
 802     _task.work(worker_id);
 803   }
 804 };
 805 
 806 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
 807   GenCollectedHeap* gch = GenCollectedHeap::heap();
 808   WorkGang* workers = gch->workers();
 809   assert(workers != NULL, "Need parallel worker threads.");
 810   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
 811   ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
 812                                  _young_gen.reserved().end(), _state_set);
 813   workers->run_task(&rp_task);
 814   _state_set.reset(0 /* bad value in debug if not reset */,
 815                    _young_gen.promotion_failed());
 816 }
 817 
 818 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
 819   GenCollectedHeap* gch = GenCollectedHeap::heap();
 820   WorkGang* workers = gch->workers();
 821   assert(workers != NULL, "Need parallel worker threads.");
 822   ParNewRefEnqueueTaskProxy enq_task(task);
 823   workers->run_task(&enq_task);
 824 }
 825 
 826 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
 827   _state_set.flush();
 828   GenCollectedHeap* gch = GenCollectedHeap::heap();
 829   gch->save_marks();
 830 }
 831 
 832 ScanClosureWithParBarrier::
 833 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 834   ScanClosure(g, gc_barrier)
 835 { }
 836 
 837 EvacuateFollowersClosureGeneral::
 838 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
 839                                 OopsInGenClosure* cur,


 866   gch->set_incremental_collection_failed();
 867   // Inform the next generation that a promotion failure occurred.
 868   _old_gen->promotion_failure_occurred();
 869 
 870   // Trace promotion failure in the parallel GC threads
 871   thread_state_set.trace_promotion_failed(gc_tracer());
 872   // Single threaded code may have reported promotion failure to the global state
 873   if (_promotion_failed_info.has_failed()) {
 874     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 875   }
 876   // Reset the PromotionFailureALot counters.
 877   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 878 }
 879 
 880 void ParNewGeneration::collect(bool   full,
 881                                bool   clear_all_soft_refs,
 882                                size_t size,
 883                                bool   is_tlab) {
 884   assert(full || size > 0, "otherwise we don't want to collect");
 885 
 886   GenCollectedHeap* gch = GenCollectedHeap::heap();
 887 
 888   _gc_timer->register_gc_start();
 889 
 890   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 891   WorkGang* workers = gch->workers();
 892   assert(workers != NULL, "Need workgang for parallel work");
 893   uint active_workers =
 894        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 895                                                workers->active_workers(),
 896                                                Threads::number_of_non_daemon_threads());
 897   active_workers = workers->update_active_workers(active_workers);
 898   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 899 
 900   _old_gen = gch->old_gen();
 901 
 902   // If the next generation is too full to accommodate worst-case promotion
 903   // from this generation, pass on collection; let the next generation
 904   // do it.
 905   if (!collection_attempt_is_safe()) {
 906     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one


1043   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1044   update_time_of_last_gc(now);
1045 
1046   rp->set_enqueuing_is_done(true);
1047   if (rp->processing_is_mt()) {
1048     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1049     rp->enqueue_discovered_references(&task_executor);
1050   } else {
1051     rp->enqueue_discovered_references(NULL);
1052   }
1053   rp->verify_no_references_recorded();
1054 
1055   gch->trace_heap_after_gc(gc_tracer());
1056 
1057   _gc_timer->register_gc_end();
1058 
1059   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1060 }
1061 
1062 size_t ParNewGeneration::desired_plab_sz() {
1063   return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
1064 }
1065 
1066 static int sum;
1067 void ParNewGeneration::waste_some_time() {
1068   for (int i = 0; i < 100; i++) {
1069     sum += i;
1070   }
1071 }
1072 
1073 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1074 
1075 // Because of concurrency, there are times where an object for which
1076 // "is_forwarded()" is true contains an "interim" forwarding pointer
1077 // value.  Such a value will soon be overwritten with a real value.
1078 // This method requires "obj" to have a forwarding pointer, and waits, if
1079 // necessary for a real one to be inserted, and returns it.
1080 
1081 oop ParNewGeneration::real_forwardee(oop obj) {
1082   oop forward_ptr = obj->forwardee();
1083   if (forward_ptr != ClaimedForwardPtr) {


1454 }
1455 #undef BUSY
1456 
1457 void ParNewGeneration::ref_processor_init() {
1458   if (_ref_processor == NULL) {
1459     // Allocate and initialize a reference processor
1460     _ref_processor =
1461       new ReferenceProcessor(_reserved,                  // span
1462                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1463                              ParallelGCThreads,          // mt processing degree
1464                              refs_discovery_is_mt(),     // mt discovery
1465                              ParallelGCThreads,          // mt discovery degree
1466                              refs_discovery_is_atomic(), // atomic_discovery
1467                              NULL);                      // is_alive_non_header
1468   }
1469 }
1470 
1471 const char* ParNewGeneration::name() const {
1472   return "par new generation";
1473 }








   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
  29 #include "gc/cms/parNewGeneration.inline.hpp"
  30 #include "gc/cms/parOopClosures.inline.hpp"
  31 #include "gc/serial/defNewGeneration.inline.hpp"
  32 #include "gc/shared/adaptiveSizePolicy.hpp"
  33 #include "gc/shared/ageTable.inline.hpp"
  34 #include "gc/shared/copyFailedInfo.hpp"
  35 #include "gc/shared/gcHeapSummary.hpp"
  36 #include "gc/shared/gcTimer.hpp"
  37 #include "gc/shared/gcTrace.hpp"
  38 #include "gc/shared/gcTraceTime.inline.hpp"
  39 #include "gc/shared/genCollectedHeap.hpp"
  40 #include "gc/shared/genOopClosures.inline.hpp"
  41 #include "gc/shared/generation.hpp"
  42 #include "gc/shared/plab.inline.hpp"
  43 #include "gc/shared/preservedMarks.inline.hpp"
  44 #include "gc/shared/referencePolicy.hpp"
  45 #include "gc/shared/space.hpp"
  46 #include "gc/shared/spaceDecorator.hpp"


 788              par_scan_state.keep_alive_closure(),
 789              par_scan_state.evacuate_followers_closure());
 790 }
 791 
 792 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 793   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 794   EnqueueTask& _task;
 795 
 796 public:
 797   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 798     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 799       _task(task)
 800   { }
 801 
 802   virtual void work(uint worker_id) {
 803     _task.work(worker_id);
 804   }
 805 };
 806 
 807 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
 808   CMSHeap* gch = CMSHeap::heap();
 809   WorkGang* workers = gch->workers();
 810   assert(workers != NULL, "Need parallel worker threads.");
 811   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
 812   ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
 813                                  _young_gen.reserved().end(), _state_set);
 814   workers->run_task(&rp_task);
 815   _state_set.reset(0 /* bad value in debug if not reset */,
 816                    _young_gen.promotion_failed());
 817 }
 818 
 819 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
 820   CMSHeap* gch = CMSHeap::heap();
 821   WorkGang* workers = gch->workers();
 822   assert(workers != NULL, "Need parallel worker threads.");
 823   ParNewRefEnqueueTaskProxy enq_task(task);
 824   workers->run_task(&enq_task);
 825 }
 826 
 827 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
 828   _state_set.flush();
 829   GenCollectedHeap* gch = GenCollectedHeap::heap();
 830   gch->save_marks();
 831 }
 832 
 833 ScanClosureWithParBarrier::
 834 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 835   ScanClosure(g, gc_barrier)
 836 { }
 837 
 838 EvacuateFollowersClosureGeneral::
 839 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
 840                                 OopsInGenClosure* cur,


 867   gch->set_incremental_collection_failed();
 868   // Inform the next generation that a promotion failure occurred.
 869   _old_gen->promotion_failure_occurred();
 870 
 871   // Trace promotion failure in the parallel GC threads
 872   thread_state_set.trace_promotion_failed(gc_tracer());
 873   // Single threaded code may have reported promotion failure to the global state
 874   if (_promotion_failed_info.has_failed()) {
 875     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 876   }
 877   // Reset the PromotionFailureALot counters.
 878   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 879 }
 880 
 881 void ParNewGeneration::collect(bool   full,
 882                                bool   clear_all_soft_refs,
 883                                size_t size,
 884                                bool   is_tlab) {
 885   assert(full || size > 0, "otherwise we don't want to collect");
 886 
 887   CMSHeap* gch = CMSHeap::heap();
 888 
 889   _gc_timer->register_gc_start();
 890 
 891   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 892   WorkGang* workers = gch->workers();
 893   assert(workers != NULL, "Need workgang for parallel work");
 894   uint active_workers =
 895        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 896                                                workers->active_workers(),
 897                                                Threads::number_of_non_daemon_threads());
 898   active_workers = workers->update_active_workers(active_workers);
 899   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 900 
 901   _old_gen = gch->old_gen();
 902 
 903   // If the next generation is too full to accommodate worst-case promotion
 904   // from this generation, pass on collection; let the next generation
 905   // do it.
 906   if (!collection_attempt_is_safe()) {
 907     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one


1044   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1045   update_time_of_last_gc(now);
1046 
1047   rp->set_enqueuing_is_done(true);
1048   if (rp->processing_is_mt()) {
1049     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1050     rp->enqueue_discovered_references(&task_executor);
1051   } else {
1052     rp->enqueue_discovered_references(NULL);
1053   }
1054   rp->verify_no_references_recorded();
1055 
1056   gch->trace_heap_after_gc(gc_tracer());
1057 
1058   _gc_timer->register_gc_end();
1059 
1060   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1061 }
1062 
1063 size_t ParNewGeneration::desired_plab_sz() {
1064   return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
1065 }
1066 
1067 static int sum;
1068 void ParNewGeneration::waste_some_time() {
1069   for (int i = 0; i < 100; i++) {
1070     sum += i;
1071   }
1072 }
1073 
1074 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1075 
1076 // Because of concurrency, there are times where an object for which
1077 // "is_forwarded()" is true contains an "interim" forwarding pointer
1078 // value.  Such a value will soon be overwritten with a real value.
1079 // This method requires "obj" to have a forwarding pointer, and waits, if
1080 // necessary for a real one to be inserted, and returns it.
1081 
1082 oop ParNewGeneration::real_forwardee(oop obj) {
1083   oop forward_ptr = obj->forwardee();
1084   if (forward_ptr != ClaimedForwardPtr) {


1455 }
1456 #undef BUSY
1457 
1458 void ParNewGeneration::ref_processor_init() {
1459   if (_ref_processor == NULL) {
1460     // Allocate and initialize a reference processor
1461     _ref_processor =
1462       new ReferenceProcessor(_reserved,                  // span
1463                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1464                              ParallelGCThreads,          // mt processing degree
1465                              refs_discovery_is_mt(),     // mt discovery
1466                              ParallelGCThreads,          // mt discovery degree
1467                              refs_discovery_is_atomic(), // atomic_discovery
1468                              NULL);                      // is_alive_non_header
1469   }
1470 }
1471 
1472 const char* ParNewGeneration::name() const {
1473   return "par new generation";
1474 }
1475 
1476 void ParNewGeneration::restore_preserved_marks() {
1477   SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
1478   _preserved_marks_set.restore(&task_executor);
1479 }
1480 
< prev index next >