< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 53382 : [mq]: 8213229-strings-as-weak-during-young-gc
rev 53385 : [mq]: 8217374-rename-g1evacuationinfo


2901     // This call will decide whether this pause is an initial-mark
2902     // pause. If it is, in_initial_mark_gc() will return true
2903     // for the duration of this pause.
2904     g1_policy()->decide_on_conc_mark_initiation();
2905   }
2906 
2907   // We do not allow initial-mark to be piggy-backed on a mixed GC.
2908   assert(!collector_state()->in_initial_mark_gc() ||
2909           collector_state()->in_young_only_phase(), "sanity");
2910 
2911   // We also do not allow mixed GCs during marking.
2912   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2913 
2914   // Record whether this pause is an initial mark. When the current
2915   // thread has completed its logging output and it's safe to signal
2916   // the CM thread, the flag's value in the policy has been reset.
2917   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2918 
2919   // Inner scope for scope based logging, timers, and stats collection
2920   {
2921     EvacuationInfo evacuation_info;
2922 
2923     if (collector_state()->in_initial_mark_gc()) {
2924       // We are about to start a marking cycle, so we increment the
2925       // full collection counter.
2926       increment_old_marking_cycles_started();
2927       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2928     }
2929 
2930     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2931 
2932     GCTraceCPUTime tcpu;
2933 
2934     G1HeapVerifier::G1VerifyType verify_type;
2935     FormatBuffer<> gc_string("Pause Young ");
2936     if (collector_state()->in_initial_mark_gc()) {
2937       gc_string.append("(Concurrent Start)");
2938       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2939     } else if (collector_state()->in_young_only_phase()) {
2940       if (collector_state()->in_young_gc_before_mixed()) {
2941         gc_string.append("(Prepare Mixed)");


3924       break;
3925     }
3926 
3927     optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
3928     if (optional_cset.prepare_failed()) {
3929       log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
3930       break;
3931     }
3932 
3933     evacuate_optional_regions(per_thread_states, &optional_cset);
3934 
3935     optional_cset.complete_evacuation();
3936     if (optional_cset.evacuation_failed()) {
3937       break;
3938     }
3939   } while (!optional_cset.is_empty());
3940 
3941   phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
3942 }
3943 
3944 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3945   // Also cleans the card table from temporary duplicate detection information used
3946   // during UpdateRS/ScanRS.
3947   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3948 
3949   // Process any discovered reference objects - we have
3950   // to do this _before_ we retire the GC alloc regions
3951   // as we may have to copy some 'reachable' referent
3952   // objects (and their reachable sub-graphs) that were
3953   // not copied during the pause.
3954   process_discovered_references(per_thread_states);
3955 
3956   G1STWIsAliveClosure is_alive(this);
3957   G1KeepAliveClosure keep_alive(this);
3958 
3959   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
3960                               g1_policy()->phase_times()->weak_phase_times());
3961 
3962   {
3963     double cleaning_start = os::elapsedTime();
3964 


4056 
4057 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4058   assert(list != NULL, "list can't be null");
4059   if (!list->is_empty()) {
4060     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4061     _hrm->insert_list_into_free_list(list);
4062   }
4063 }
4064 
4065 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4066   decrease_used(bytes);
4067 }
4068 
4069 class G1FreeCollectionSetTask : public AbstractGangTask {
4070 private:
4071 
4072   // Closure applied to all regions in the collection set to do work that needs to
4073   // be done serially in a single thread.
4074   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4075   private:
4076     EvacuationInfo* _evacuation_info;
4077     const size_t* _surviving_young_words;
4078 
4079     // Bytes used in successfully evacuated regions before the evacuation.
4080     size_t _before_used_bytes;
4081     // Bytes used in unsucessfully evacuated regions before the evacuation
4082     size_t _after_used_bytes;
4083 
4084     size_t _bytes_allocated_in_old_since_last_gc;
4085 
4086     size_t _failure_used_words;
4087     size_t _failure_waste_words;
4088 
4089     FreeRegionList _local_free_list;
4090   public:
4091     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4092       HeapRegionClosure(),
4093       _evacuation_info(evacuation_info),
4094       _surviving_young_words(surviving_young_words),
4095       _before_used_bytes(0),
4096       _after_used_bytes(0),
4097       _bytes_allocated_in_old_since_last_gc(0),
4098       _failure_used_words(0),
4099       _failure_waste_words(0),
4100       _local_free_list("Local Region List for CSet Freeing") {
4101     }
4102 
4103     virtual bool do_heap_region(HeapRegion* r) {
4104       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4105 
4106       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4107       g1h->clear_in_cset(r);
4108 
4109       if (r->is_young()) {
4110         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4111                "Young index %d is wrong for region %u of type %s with %u young regions",


4225 
4226     virtual bool do_heap_region(HeapRegion* r) {
4227       _work_items[_cur_idx++] = WorkItem(r);
4228       return false;
4229     }
4230   };
4231 
4232   void prepare_work() {
4233     G1PrepareFreeCollectionSetClosure cl(_work_items);
4234     _collection_set->iterate(&cl);
4235   }
4236 
4237   void complete_work() {
4238     _cl.complete_work();
4239 
4240     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4241     policy->record_max_rs_lengths(_rs_lengths);
4242     policy->cset_regions_freed();
4243   }
4244 public:
4245   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4246     AbstractGangTask("G1 Free Collection Set"),
4247     _collection_set(collection_set),
4248     _cl(evacuation_info, surviving_young_words),
4249     _surviving_young_words(surviving_young_words),
4250     _rs_lengths(0),
4251     _serial_work_claim(0),
4252     _parallel_work_claim(0),
4253     _num_work_items(collection_set->region_length()),
4254     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4255     prepare_work();
4256   }
4257 
4258   ~G1FreeCollectionSetTask() {
4259     complete_work();
4260     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4261   }
4262 
4263   // Chunk size for work distribution. The chosen value has been determined experimentally
4264   // to be a good tradeoff between overhead and achievable parallelism.
4265   static uint chunk_size() { return 32; }


4308           has_young_time = true;
4309           event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
4310         } else {
4311           non_young_time += time_taken;
4312           has_non_young_time = true;
4313           event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
4314         }
4315         start_time = end_time;
4316       }
4317     }
4318 
4319     if (has_young_time) {
4320       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4321     }
4322     if (has_non_young_time) {
4323       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);
4324     }
4325   }
4326 };
4327 
4328 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4329   _eden.clear();
4330 
4331   double free_cset_start_time = os::elapsedTime();
4332 
4333   {
4334     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4335     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4336 
4337     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4338 
4339     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4340                         cl.name(),
4341                         num_workers,
4342                         _collection_set.region_length());
4343     workers()->run_task(&cl, num_workers);
4344   }
4345   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4346 
4347   collection_set->clear();
4348 }




2901     // This call will decide whether this pause is an initial-mark
2902     // pause. If it is, in_initial_mark_gc() will return true
2903     // for the duration of this pause.
2904     g1_policy()->decide_on_conc_mark_initiation();
2905   }
2906 
2907   // We do not allow initial-mark to be piggy-backed on a mixed GC.
2908   assert(!collector_state()->in_initial_mark_gc() ||
2909           collector_state()->in_young_only_phase(), "sanity");
2910 
2911   // We also do not allow mixed GCs during marking.
2912   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2913 
2914   // Record whether this pause is an initial mark. When the current
2915   // thread has completed its logging output and it's safe to signal
2916   // the CM thread, the flag's value in the policy has been reset.
2917   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2918 
2919   // Inner scope for scope based logging, timers, and stats collection
2920   {
2921     G1EvacuationInfo evacuation_info;
2922 
2923     if (collector_state()->in_initial_mark_gc()) {
2924       // We are about to start a marking cycle, so we increment the
2925       // full collection counter.
2926       increment_old_marking_cycles_started();
2927       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2928     }
2929 
2930     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2931 
2932     GCTraceCPUTime tcpu;
2933 
2934     G1HeapVerifier::G1VerifyType verify_type;
2935     FormatBuffer<> gc_string("Pause Young ");
2936     if (collector_state()->in_initial_mark_gc()) {
2937       gc_string.append("(Concurrent Start)");
2938       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2939     } else if (collector_state()->in_young_only_phase()) {
2940       if (collector_state()->in_young_gc_before_mixed()) {
2941         gc_string.append("(Prepare Mixed)");


3924       break;
3925     }
3926 
3927     optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
3928     if (optional_cset.prepare_failed()) {
3929       log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
3930       break;
3931     }
3932 
3933     evacuate_optional_regions(per_thread_states, &optional_cset);
3934 
3935     optional_cset.complete_evacuation();
3936     if (optional_cset.evacuation_failed()) {
3937       break;
3938     }
3939   } while (!optional_cset.is_empty());
3940 
3941   phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
3942 }
3943 
3944 void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3945   // Also cleans the card table from temporary duplicate detection information used
3946   // during UpdateRS/ScanRS.
3947   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3948 
3949   // Process any discovered reference objects - we have
3950   // to do this _before_ we retire the GC alloc regions
3951   // as we may have to copy some 'reachable' referent
3952   // objects (and their reachable sub-graphs) that were
3953   // not copied during the pause.
3954   process_discovered_references(per_thread_states);
3955 
3956   G1STWIsAliveClosure is_alive(this);
3957   G1KeepAliveClosure keep_alive(this);
3958 
3959   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
3960                               g1_policy()->phase_times()->weak_phase_times());
3961 
3962   {
3963     double cleaning_start = os::elapsedTime();
3964 


4056 
4057 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4058   assert(list != NULL, "list can't be null");
4059   if (!list->is_empty()) {
4060     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4061     _hrm->insert_list_into_free_list(list);
4062   }
4063 }
4064 
4065 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4066   decrease_used(bytes);
4067 }
4068 
4069 class G1FreeCollectionSetTask : public AbstractGangTask {
4070 private:
4071 
4072   // Closure applied to all regions in the collection set to do work that needs to
4073   // be done serially in a single thread.
4074   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4075   private:
4076     G1EvacuationInfo* _evacuation_info;
4077     const size_t* _surviving_young_words;
4078 
4079     // Bytes used in successfully evacuated regions before the evacuation.
4080     size_t _before_used_bytes;
4081     // Bytes used in unsucessfully evacuated regions before the evacuation
4082     size_t _after_used_bytes;
4083 
4084     size_t _bytes_allocated_in_old_since_last_gc;
4085 
4086     size_t _failure_used_words;
4087     size_t _failure_waste_words;
4088 
4089     FreeRegionList _local_free_list;
4090   public:
4091     G1SerialFreeCollectionSetClosure(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4092       HeapRegionClosure(),
4093       _evacuation_info(evacuation_info),
4094       _surviving_young_words(surviving_young_words),
4095       _before_used_bytes(0),
4096       _after_used_bytes(0),
4097       _bytes_allocated_in_old_since_last_gc(0),
4098       _failure_used_words(0),
4099       _failure_waste_words(0),
4100       _local_free_list("Local Region List for CSet Freeing") {
4101     }
4102 
4103     virtual bool do_heap_region(HeapRegion* r) {
4104       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4105 
4106       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4107       g1h->clear_in_cset(r);
4108 
4109       if (r->is_young()) {
4110         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4111                "Young index %d is wrong for region %u of type %s with %u young regions",


4225 
4226     virtual bool do_heap_region(HeapRegion* r) {
4227       _work_items[_cur_idx++] = WorkItem(r);
4228       return false;
4229     }
4230   };
4231 
4232   void prepare_work() {
4233     G1PrepareFreeCollectionSetClosure cl(_work_items);
4234     _collection_set->iterate(&cl);
4235   }
4236 
4237   void complete_work() {
4238     _cl.complete_work();
4239 
4240     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4241     policy->record_max_rs_lengths(_rs_lengths);
4242     policy->cset_regions_freed();
4243   }
4244 public:
4245   G1FreeCollectionSetTask(G1CollectionSet* collection_set, G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4246     AbstractGangTask("G1 Free Collection Set"),
4247     _collection_set(collection_set),
4248     _cl(evacuation_info, surviving_young_words),
4249     _surviving_young_words(surviving_young_words),
4250     _rs_lengths(0),
4251     _serial_work_claim(0),
4252     _parallel_work_claim(0),
4253     _num_work_items(collection_set->region_length()),
4254     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4255     prepare_work();
4256   }
4257 
4258   ~G1FreeCollectionSetTask() {
4259     complete_work();
4260     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4261   }
4262 
4263   // Chunk size for work distribution. The chosen value has been determined experimentally
4264   // to be a good tradeoff between overhead and achievable parallelism.
4265   static uint chunk_size() { return 32; }


4308           has_young_time = true;
4309           event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
4310         } else {
4311           non_young_time += time_taken;
4312           has_non_young_time = true;
4313           event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
4314         }
4315         start_time = end_time;
4316       }
4317     }
4318 
4319     if (has_young_time) {
4320       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4321     }
4322     if (has_non_young_time) {
4323       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);
4324     }
4325   }
4326 };
4327 
4328 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4329   _eden.clear();
4330 
4331   double free_cset_start_time = os::elapsedTime();
4332 
4333   {
4334     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4335     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4336 
4337     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4338 
4339     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4340                         cl.name(),
4341                         num_workers,
4342                         _collection_set.region_length());
4343     workers()->run_task(&cl, num_workers);
4344   }
4345   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4346 
4347   collection_set->clear();
4348 }


< prev index next >