898 _next_gen->promotion_failure_occurred();
899
900 // Trace promotion failure in the parallel GC threads
901 thread_state_set.trace_promotion_failed(gc_tracer);
902 // Single threaded code may have reported promotion failure to the global state
903 if (_promotion_failed_info.has_failed()) {
904 gc_tracer.report_promotion_failed(_promotion_failed_info);
905 }
906 // Reset the PromotionFailureALot counters.
907 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
908 }
909
910 void ParNewGeneration::collect(bool full,
911 bool clear_all_soft_refs,
912 size_t size,
913 bool is_tlab) {
914 assert(full || size > 0, "otherwise we don't want to collect");
915
916 GenCollectedHeap* gch = GenCollectedHeap::heap();
917
918 _gc_timer->register_gc_start(os::elapsed_counter());
919
920 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
921 "not a CMS generational heap");
922 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
923 FlexibleWorkGang* workers = gch->workers();
924 assert(workers != NULL, "Need workgang for parallel work");
925 int active_workers =
926 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
927 workers->active_workers(),
928 Threads::number_of_non_daemon_threads());
929 workers->set_active_workers(active_workers);
930 assert(gch->n_gens() == 2,
931 "Par collection currently only works with single older gen.");
932 _next_gen = gch->next_gen(this);
933 // Do we have to avoid promotion_undo?
934 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
935 set_avoid_promotion_undo(true);
936 }
937
938 // If the next generation is too full to accommodate worst-case promotion
1074 // We need to use a monotonically non-deccreasing time in ms
1075 // or we will see time-warp warnings and os::javaTimeMillis()
1076 // does not guarantee monotonicity.
1077 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1078 update_time_of_last_gc(now);
1079
1080 SpecializationStats::print();
1081
1082 rp->set_enqueuing_is_done(true);
1083 if (rp->processing_is_mt()) {
1084 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1085 rp->enqueue_discovered_references(&task_executor);
1086 } else {
1087 rp->enqueue_discovered_references(NULL);
1088 }
1089 rp->verify_no_references_recorded();
1090
1091 gch->trace_heap_after_gc(&gc_tracer);
1092 gc_tracer.report_tenuring_threshold(tenuring_threshold());
1093
1094 _gc_timer->register_gc_end(os::elapsed_counter());
1095
1096 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1097 }
1098
1099 static int sum;
1100 void ParNewGeneration::waste_some_time() {
1101 for (int i = 0; i < 100; i++) {
1102 sum += i;
1103 }
1104 }
1105
1106 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1107
1108 // Because of concurrency, there are times where an object for which
1109 // "is_forwarded()" is true contains an "interim" forwarding pointer
1110 // value. Such a value will soon be overwritten with a real value.
1111 // This method requires "obj" to have a forwarding pointer, and waits, if
1112 // necessary for a real one to be inserted, and returns it.
1113
1114 oop ParNewGeneration::real_forwardee(oop obj) {
|
898 _next_gen->promotion_failure_occurred();
899
900 // Trace promotion failure in the parallel GC threads
901 thread_state_set.trace_promotion_failed(gc_tracer);
902 // Single threaded code may have reported promotion failure to the global state
903 if (_promotion_failed_info.has_failed()) {
904 gc_tracer.report_promotion_failed(_promotion_failed_info);
905 }
906 // Reset the PromotionFailureALot counters.
907 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
908 }
909
910 void ParNewGeneration::collect(bool full,
911 bool clear_all_soft_refs,
912 size_t size,
913 bool is_tlab) {
914 assert(full || size > 0, "otherwise we don't want to collect");
915
916 GenCollectedHeap* gch = GenCollectedHeap::heap();
917
918 _gc_timer->register_gc_start();
919
920 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
921 "not a CMS generational heap");
922 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
923 FlexibleWorkGang* workers = gch->workers();
924 assert(workers != NULL, "Need workgang for parallel work");
925 int active_workers =
926 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
927 workers->active_workers(),
928 Threads::number_of_non_daemon_threads());
929 workers->set_active_workers(active_workers);
930 assert(gch->n_gens() == 2,
931 "Par collection currently only works with single older gen.");
932 _next_gen = gch->next_gen(this);
933 // Do we have to avoid promotion_undo?
934 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
935 set_avoid_promotion_undo(true);
936 }
937
938 // If the next generation is too full to accommodate worst-case promotion
1074 // We need to use a monotonically non-deccreasing time in ms
1075 // or we will see time-warp warnings and os::javaTimeMillis()
1076 // does not guarantee monotonicity.
1077 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1078 update_time_of_last_gc(now);
1079
1080 SpecializationStats::print();
1081
1082 rp->set_enqueuing_is_done(true);
1083 if (rp->processing_is_mt()) {
1084 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1085 rp->enqueue_discovered_references(&task_executor);
1086 } else {
1087 rp->enqueue_discovered_references(NULL);
1088 }
1089 rp->verify_no_references_recorded();
1090
1091 gch->trace_heap_after_gc(&gc_tracer);
1092 gc_tracer.report_tenuring_threshold(tenuring_threshold());
1093
1094 _gc_timer->register_gc_end();
1095
1096 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1097 }
1098
1099 static int sum;
1100 void ParNewGeneration::waste_some_time() {
1101 for (int i = 0; i < 100; i++) {
1102 sum += i;
1103 }
1104 }
1105
1106 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1107
1108 // Because of concurrency, there are times where an object for which
1109 // "is_forwarded()" is true contains an "interim" forwarding pointer
1110 // value. Such a value will soon be overwritten with a real value.
1111 // This method requires "obj" to have a forwarding pointer, and waits, if
1112 // necessary for a real one to be inserted, and returns it.
1113
1114 oop ParNewGeneration::real_forwardee(oop obj) {
|