3360 };
3361
3362 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3363 OopClosure* keep_alive,
3364 G1GCPhaseTimes* phase_times) {
3365 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3366 workers()->run_task(&cl);
3367 }
3368
3369 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3370 private:
3371 G1RedirtyCardsQueueSet* _qset;
3372 G1CollectedHeap* _g1h;
3373 BufferNode* volatile _nodes;
3374
3375 void par_apply(RedirtyLoggedCardTableEntryClosure* cl, uint worker_id) {
3376 size_t buffer_size = _qset->buffer_size();
3377 BufferNode* next = Atomic::load(&_nodes);
3378 while (next != NULL) {
3379 BufferNode* node = next;
3380 next = Atomic::cmpxchg(node->next(), &_nodes, node);
3381 if (next == node) {
3382 cl->apply_to_buffer(node, buffer_size, worker_id);
3383 next = node->next();
3384 }
3385 }
3386 }
3387
3388 public:
3389 G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
3390 AbstractGangTask("Redirty Cards"),
3391 _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
3392
3393 virtual void work(uint worker_id) {
3394 G1GCPhaseTimes* p = _g1h->phase_times();
3395 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
3396
3397 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3398 par_apply(&cl, worker_id);
3399
3400 p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
4209 evacuation_failed = r->evacuation_failed();
4210 }
4211 };
4212
4213 volatile size_t _parallel_work_claim;
4214 size_t _num_work_items;
4215 WorkItem* _work_items;
4216
4217 void do_serial_work() {
4218 // Need to grab the lock to be allowed to modify the old region list.
4219 MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4220 _collection_set->iterate(&_cl);
4221 }
4222
4223 void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4224 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4225
4226 HeapRegion* r = g1h->region_at(region_idx);
4227 assert(!g1h->is_on_master_free_list(r), "sanity");
4228
4229 Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
4230
4231 if (!is_young) {
4232 g1h->hot_card_cache()->reset_card_counts(r);
4233 }
4234
4235 if (!evacuation_failed) {
4236 r->rem_set()->clear_locked();
4237 }
4238 }
4239
4240 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4241 private:
4242 size_t _cur_idx;
4243 WorkItem* _work_items;
4244 public:
4245 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4246
4247 virtual bool do_heap_region(HeapRegion* r) {
4248 _work_items[_cur_idx++] = WorkItem(r);
4249 return false;
4273 _parallel_work_claim(0),
4274 _num_work_items(collection_set->region_length()),
4275 _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4276 prepare_work();
4277 }
4278
4279 ~G1FreeCollectionSetTask() {
4280 complete_work();
4281 FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4282 }
4283
4284 // Chunk size for work distribution. The chosen value has been determined experimentally
4285 // to be a good tradeoff between overhead and achievable parallelism.
4286 static uint chunk_size() { return 32; }
4287
4288 virtual void work(uint worker_id) {
4289 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times();
4290
4291 // Claim serial work.
4292 if (_serial_work_claim == 0) {
4293 jint value = Atomic::add(1, &_serial_work_claim) - 1;
4294 if (value == 0) {
4295 double serial_time = os::elapsedTime();
4296 do_serial_work();
4297 timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4298 }
4299 }
4300
4301 // Start parallel work.
4302 double young_time = 0.0;
4303 bool has_young_time = false;
4304 double non_young_time = 0.0;
4305 bool has_non_young_time = false;
4306
4307 while (true) {
4308 size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4309 size_t cur = end - chunk_size();
4310
4311 if (cur >= _num_work_items) {
4312 break;
4313 }
4314
4315 EventGCPhaseParallel event;
4316 double start_time = os::elapsedTime();
4317
4318 end = MIN2(end, _num_work_items);
4319
4320 for (; cur < end; cur++) {
4321 bool is_young = _work_items[cur].is_young;
4322
4323 do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4324
4325 double end_time = os::elapsedTime();
4326 double time_taken = end_time - start_time;
4327 if (is_young) {
4328 young_time += time_taken;
|
3360 };
3361
3362 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3363 OopClosure* keep_alive,
3364 G1GCPhaseTimes* phase_times) {
3365 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3366 workers()->run_task(&cl);
3367 }
3368
3369 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3370 private:
3371 G1RedirtyCardsQueueSet* _qset;
3372 G1CollectedHeap* _g1h;
3373 BufferNode* volatile _nodes;
3374
3375 void par_apply(RedirtyLoggedCardTableEntryClosure* cl, uint worker_id) {
3376 size_t buffer_size = _qset->buffer_size();
3377 BufferNode* next = Atomic::load(&_nodes);
3378 while (next != NULL) {
3379 BufferNode* node = next;
3380 next = Atomic::cmpxchg(&_nodes, node, node->next());
3381 if (next == node) {
3382 cl->apply_to_buffer(node, buffer_size, worker_id);
3383 next = node->next();
3384 }
3385 }
3386 }
3387
3388 public:
3389 G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
3390 AbstractGangTask("Redirty Cards"),
3391 _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
3392
3393 virtual void work(uint worker_id) {
3394 G1GCPhaseTimes* p = _g1h->phase_times();
3395 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
3396
3397 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3398 par_apply(&cl, worker_id);
3399
3400 p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
4209 evacuation_failed = r->evacuation_failed();
4210 }
4211 };
4212
4213 volatile size_t _parallel_work_claim;
4214 size_t _num_work_items;
4215 WorkItem* _work_items;
4216
4217 void do_serial_work() {
4218 // Need to grab the lock to be allowed to modify the old region list.
4219 MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4220 _collection_set->iterate(&_cl);
4221 }
4222
4223 void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4224 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4225
4226 HeapRegion* r = g1h->region_at(region_idx);
4227 assert(!g1h->is_on_master_free_list(r), "sanity");
4228
4229 Atomic::add(&_rs_length, r->rem_set()->occupied_locked());
4230
4231 if (!is_young) {
4232 g1h->hot_card_cache()->reset_card_counts(r);
4233 }
4234
4235 if (!evacuation_failed) {
4236 r->rem_set()->clear_locked();
4237 }
4238 }
4239
4240 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4241 private:
4242 size_t _cur_idx;
4243 WorkItem* _work_items;
4244 public:
4245 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4246
4247 virtual bool do_heap_region(HeapRegion* r) {
4248 _work_items[_cur_idx++] = WorkItem(r);
4249 return false;
4273 _parallel_work_claim(0),
4274 _num_work_items(collection_set->region_length()),
4275 _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4276 prepare_work();
4277 }
4278
4279 ~G1FreeCollectionSetTask() {
4280 complete_work();
4281 FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4282 }
4283
4284 // Chunk size for work distribution. The chosen value has been determined experimentally
4285 // to be a good tradeoff between overhead and achievable parallelism.
4286 static uint chunk_size() { return 32; }
4287
4288 virtual void work(uint worker_id) {
4289 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->phase_times();
4290
4291 // Claim serial work.
4292 if (_serial_work_claim == 0) {
4293 jint value = Atomic::add(&_serial_work_claim, 1) - 1;
4294 if (value == 0) {
4295 double serial_time = os::elapsedTime();
4296 do_serial_work();
4297 timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4298 }
4299 }
4300
4301 // Start parallel work.
4302 double young_time = 0.0;
4303 bool has_young_time = false;
4304 double non_young_time = 0.0;
4305 bool has_non_young_time = false;
4306
4307 while (true) {
4308 size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
4309 size_t cur = end - chunk_size();
4310
4311 if (cur >= _num_work_items) {
4312 break;
4313 }
4314
4315 EventGCPhaseParallel event;
4316 double start_time = os::elapsedTime();
4317
4318 end = MIN2(end, _num_work_items);
4319
4320 for (; cur < end; cur++) {
4321 bool is_young = _work_items[cur].is_young;
4322
4323 do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4324
4325 double end_time = os::elapsedTime();
4326 double time_taken = end_time - start_time;
4327 if (is_young) {
4328 young_time += time_taken;
|