< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




4209       evacuation_failed = r->evacuation_failed();
4210     }
4211   };
4212 
4213   volatile size_t _parallel_work_claim;
4214   size_t _num_work_items;
4215   WorkItem* _work_items;
4216 
4217   void do_serial_work() {
4218     // Need to grab the lock to be allowed to modify the old region list.
4219     MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4220     _collection_set->iterate(&_cl);
4221   }
4222 
4223   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4224     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4225 
4226     HeapRegion* r = g1h->region_at(region_idx);
4227     assert(!g1h->is_on_master_free_list(r), "sanity");
4228 
4229     Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
4230 
4231     if (!is_young) {
4232       g1h->hot_card_cache()->reset_card_counts(r);
4233     }
4234 
4235     if (!evacuation_failed) {
4236       r->rem_set()->clear_locked();
4237     }
4238   }
4239 
4240   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4241   private:
4242     size_t _cur_idx;
4243     WorkItem* _work_items;
4244   public:
4245     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4246 
4247     virtual bool do_heap_region(HeapRegion* r) {
4248       _work_items[_cur_idx++] = WorkItem(r);
4249       return false;


4273     _parallel_work_claim(0),
4274     _num_work_items(collection_set->region_length()),
4275     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4276     prepare_work();
4277   }
4278 
4279   ~G1FreeCollectionSetTask() {
4280     complete_work();
4281     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4282   }
4283 
4284   // Chunk size for work distribution. The chosen value has been determined experimentally
4285   // to be a good tradeoff between overhead and achievable parallelism.
4286   static uint chunk_size() { return 32; }
4287 
4288   virtual void work(uint worker_id) {
4289     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times();
4290 
4291     // Claim serial work.
4292     if (_serial_work_claim == 0) {
4293       jint value = Atomic::add(1, &_serial_work_claim) - 1;
4294       if (value == 0) {
4295         double serial_time = os::elapsedTime();
4296         do_serial_work();
4297         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4298       }
4299     }
4300 
4301     // Start parallel work.
4302     double young_time = 0.0;
4303     bool has_young_time = false;
4304     double non_young_time = 0.0;
4305     bool has_non_young_time = false;
4306 
4307     while (true) {
4308       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4309       size_t cur = end - chunk_size();
4310 
4311       if (cur >= _num_work_items) {
4312         break;
4313       }
4314 
4315       EventGCPhaseParallel event;
4316       double start_time = os::elapsedTime();
4317 
4318       end = MIN2(end, _num_work_items);
4319 
4320       for (; cur < end; cur++) {
4321         bool is_young = _work_items[cur].is_young;
4322 
4323         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4324 
4325         double end_time = os::elapsedTime();
4326         double time_taken = end_time - start_time;
4327         if (is_young) {
4328           young_time += time_taken;




4209       evacuation_failed = r->evacuation_failed();
4210     }
4211   };
4212 
4213   volatile size_t _parallel_work_claim;
4214   size_t _num_work_items;
4215   WorkItem* _work_items;
4216 
4217   void do_serial_work() {
4218     // Need to grab the lock to be allowed to modify the old region list.
4219     MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4220     _collection_set->iterate(&_cl);
4221   }
4222 
4223   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4224     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4225 
4226     HeapRegion* r = g1h->region_at(region_idx);
4227     assert(!g1h->is_on_master_free_list(r), "sanity");
4228 
4229     Atomic::add(&_rs_length, r->rem_set()->occupied_locked());
4230 
4231     if (!is_young) {
4232       g1h->hot_card_cache()->reset_card_counts(r);
4233     }
4234 
4235     if (!evacuation_failed) {
4236       r->rem_set()->clear_locked();
4237     }
4238   }
4239 
4240   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4241   private:
4242     size_t _cur_idx;
4243     WorkItem* _work_items;
4244   public:
4245     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4246 
4247     virtual bool do_heap_region(HeapRegion* r) {
4248       _work_items[_cur_idx++] = WorkItem(r);
4249       return false;


4273     _parallel_work_claim(0),
4274     _num_work_items(collection_set->region_length()),
4275     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4276     prepare_work();
4277   }
4278 
4279   ~G1FreeCollectionSetTask() {
4280     complete_work();
4281     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4282   }
4283 
4284   // Chunk size for work distribution. The chosen value has been determined experimentally
4285   // to be a good tradeoff between overhead and achievable parallelism.
4286   static uint chunk_size() { return 32; }
4287 
4288   virtual void work(uint worker_id) {
4289     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times();
4290 
4291     // Claim serial work.
4292     if (_serial_work_claim == 0) {
4293       jint value = Atomic::add(&_serial_work_claim, 1) - 1;
4294       if (value == 0) {
4295         double serial_time = os::elapsedTime();
4296         do_serial_work();
4297         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4298       }
4299     }
4300 
4301     // Start parallel work.
4302     double young_time = 0.0;
4303     bool has_young_time = false;
4304     double non_young_time = 0.0;
4305     bool has_non_young_time = false;
4306 
4307     while (true) {
4308       size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
4309       size_t cur = end - chunk_size();
4310 
4311       if (cur >= _num_work_items) {
4312         break;
4313       }
4314 
4315       EventGCPhaseParallel event;
4316       double start_time = os::elapsedTime();
4317 
4318       end = MIN2(end, _num_work_items);
4319 
4320       for (; cur < end; cur++) {
4321         bool is_young = _work_items[cur].is_young;
4322 
4323         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4324 
4325         double end_time = os::elapsedTime();
4326         double time_taken = end_time - start_time;
4327         if (is_young) {
4328           young_time += time_taken;


< prev index next >