298 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
299 }
300
301 void G1RemSet::initialize(size_t capacity, uint max_regions) {
302 G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
303 _scan_state->initialize(max_regions);
304 }
305
306 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
307 G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
308 G1ParScanThreadState* pss,
309 G1GCPhaseTimes::GCParPhases phase,
310 uint worker_i) :
311 _g1h(G1CollectedHeap::heap()),
312 _ct(_g1h->card_table()),
313 _pss(pss),
314 _scan_objs_on_card_cl(scan_obj_on_card),
315 _scan_state(scan_state),
316 _phase(phase),
317 _worker_i(worker_i),
318 _cards_scanned(0),
319 _cards_claimed(0),
320 _cards_skipped(0),
321 _rem_set_root_scan_time(),
322 _rem_set_trim_partially_time(),
323 _strong_code_root_scan_time(),
324 _strong_code_trim_partially_time() {
325 }
326
327 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
328 _ct->set_card_claimed(card_index);
329 _scan_state->add_dirty_region(region_idx_for_card);
330 }
331
332 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
333 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
334 assert(!card_region->is_young(), "Should not scan card in young region %u", region_idx_for_card);
335 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
336 _scan_objs_on_card_cl->trim_queue_partially();
337 _cards_scanned++;
338 }
339
340 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
341 EventGCPhaseParallel event;
342 uint const region_idx = r->hrm_index();
343
344 if (_scan_state->claim_iter(region_idx)) {
345 // If we ever free the collection set concurrently, we should also
346 // clear the card table concurrently therefore we won't need to
347 // add regions of the collection set to the dirty cards region.
348 _scan_state->add_dirty_region(region_idx);
349 }
350
351 if (r->rem_set()->cardset_is_empty()) {
352 return;
353 }
354
355 // We claim cards in blocks so as to reduce the contention.
356 size_t const block_size = G1RSetScanBlockSize;
357
358 HeapRegionRemSetIterator iter(r->rem_set());
359 size_t card_index;
396 claim_card(card_index, region_idx_for_card);
397
398 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
399
400 scan_card(mr, region_idx_for_card);
401 }
402 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
403 }
404
405 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
406 EventGCPhaseParallel event;
407 // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
408 // treating the nmethods visited to act as roots for concurrent marking.
409 // We only want to make sure that the oops in the nmethods are adjusted with regard to the
410 // objects copied by the current evacuation.
411 r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
412 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
413 }
414
415 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
416 assert(r->in_collection_set(),
417 "Should only be called on elements of the collection set but region %u is not.",
418 r->hrm_index());
419 uint const region_idx = r->hrm_index();
420
421 // Do an early out if we know we are complete.
422 if (_scan_state->iter_is_complete(region_idx)) {
423 return false;
424 }
425
426 {
427 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
428 scan_rem_set_roots(r);
429 }
430
431 if (_scan_state->set_iter_complete(region_idx)) {
432 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
433 // Scan the strong code root list attached to the current region
434 scan_strong_code_roots(r);
435 }
436 return false;
437 }
438
439 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
440 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
441 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i);
442 _g1h->collection_set_iterate_from(&cl, worker_i);
443
444 G1GCPhaseTimes* p = _g1p->phase_times();
445
446 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds());
447 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds());
448
449 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
450 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
451 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
452
453 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds());
454 p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds());
455 }
456
457 // Closure used for updating rem sets. Only called during an evacuation pause.
458 class G1RefineCardClosure: public G1CardTableEntryClosure {
459 G1RemSet* _g1rs;
460 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
461
462 size_t _cards_scanned;
463 size_t _cards_skipped;
464 public:
465 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) :
466 _g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
467 {}
468
469 bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
470 // The only time we care about recording cards that
471 // contain references that point into the collection set
472 // is during RSet updating within an evacuation pause.
473 // In this case worker_i should be the id of a GC worker thread.
474 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
494 // Apply closure to log entries in the HCC.
495 if (G1HotCardCache::default_use_cache()) {
496 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
497
498 G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss);
499 G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
500 _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
501 }
502
503 // Now apply the closure to all remaining log entries.
504 {
505 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
506
507 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss);
508 G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
509 _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
510
511 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
512 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
513 }
514 }
515
516 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
517 update_rem_set(pss, worker_i);
518 scan_rem_set(pss, worker_i);;
519 }
520
521 void G1RemSet::prepare_for_oops_into_collection_set_do() {
522 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
523 dcqs.concatenate_logs();
524
525 _scan_state->reset();
526 }
527
528 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
529 G1GCPhaseTimes* phase_times = _g1h->phase_times();
530
531 // Set all cards back to clean.
532 double start = os::elapsedTime();
533 _scan_state->clear_card_table(_g1h->workers());
534 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
535 }
536
537 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
538 #ifdef ASSERT
|
298 return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
299 }
300
301 void G1RemSet::initialize(size_t capacity, uint max_regions) {
302 G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
303 _scan_state->initialize(max_regions);
304 }
305
306 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
307 G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
308 G1ParScanThreadState* pss,
309 G1GCPhaseTimes::GCParPhases phase,
310 uint worker_i) :
311 _g1h(G1CollectedHeap::heap()),
312 _ct(_g1h->card_table()),
313 _pss(pss),
314 _scan_objs_on_card_cl(scan_obj_on_card),
315 _scan_state(scan_state),
316 _phase(phase),
317 _worker_i(worker_i),
318 _opt_refs_scanned(0),
319 _opt_refs_memory_used(0),
320 _cards_scanned(0),
321 _cards_claimed(0),
322 _cards_skipped(0),
323 _rem_set_root_scan_time(),
324 _rem_set_trim_partially_time(),
325 _strong_code_root_scan_time(),
326 _strong_code_trim_partially_time() {
327 }
328
329 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
330 _ct->set_card_claimed(card_index);
331 _scan_state->add_dirty_region(region_idx_for_card);
332 }
333
334 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
335 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
336 assert(!card_region->is_young(), "Should not scan card in young region %u", region_idx_for_card);
337 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
338 _scan_objs_on_card_cl->trim_queue_partially();
339 _cards_scanned++;
340 }
341
342 void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r){
343 EventGCPhaseParallel event;
344
345 G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
346
347 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss);
348 G1ScanRSForOptionalClosure cl(&scan_cl);
349 _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
350 _opt_refs_memory_used += opt_rem_set_list->used_memory();
351
352 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
353 }
354
355 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
356 EventGCPhaseParallel event;
357 uint const region_idx = r->hrm_index();
358
359 if (_scan_state->claim_iter(region_idx)) {
360 // If we ever free the collection set concurrently, we should also
361 // clear the card table concurrently therefore we won't need to
362 // add regions of the collection set to the dirty cards region.
363 _scan_state->add_dirty_region(region_idx);
364 }
365
366 if (r->rem_set()->cardset_is_empty()) {
367 return;
368 }
369
370 // We claim cards in blocks so as to reduce the contention.
371 size_t const block_size = G1RSetScanBlockSize;
372
373 HeapRegionRemSetIterator iter(r->rem_set());
374 size_t card_index;
411 claim_card(card_index, region_idx_for_card);
412
413 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
414
415 scan_card(mr, region_idx_for_card);
416 }
417 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
418 }
419
420 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
421 EventGCPhaseParallel event;
422 // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
423 // treating the nmethods visited to act as roots for concurrent marking.
424 // We only want to make sure that the oops in the nmethods are adjusted with regard to the
425 // objects copied by the current evacuation.
426 r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
427 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
428 }
429
430 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
431 assert(r->in_collection_set(), "Region %u is not in the collection set.", r->hrm_index());
432 uint const region_idx = r->hrm_index();
433
434 // The individual references for the optional remembered set are per-worker, so we
435 // always need to scan them.
436 if (r->has_index_in_opt_cset()) {
437 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
438 scan_opt_rem_set_roots(r);
439 }
440
441 // Do an early out if we know we are complete.
442 if (_scan_state->iter_is_complete(region_idx)) {
443 return false;
444 }
445
446 {
447 G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
448 scan_rem_set_roots(r);
449 }
450
451 if (_scan_state->set_iter_complete(region_idx)) {
452 G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
453 // Scan the strong code root list attached to the current region
454 scan_strong_code_roots(r);
455 }
456 return false;
457 }
458
459 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
460 uint worker_i,
461 G1GCPhaseTimes::GCParPhases scan_phase,
462 G1GCPhaseTimes::GCParPhases objcopy_phase,
463 G1GCPhaseTimes::GCParPhases coderoots_phase) {
464 assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
465
466 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
467 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
468 _g1h->collection_set_iterate_from(&cl, worker_i);
469
470 G1GCPhaseTimes* p = _g1p->phase_times();
471
472 p->record_or_add_time_secs(objcopy_phase, worker_i, cl.rem_set_trim_partially_time().seconds());
473
474 p->record_or_add_time_secs(scan_phase, worker_i, cl.rem_set_root_scan_time().seconds());
475 p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
476 p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
477 p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
478 // At this time we only record some metrics for the optional remembered set.
479 if (scan_phase == G1GCPhaseTimes::OptScanRS) {
480 p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanRSScannedOptRefs);
481 p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanRSUsedMemory);
482 }
483
484 p->record_or_add_time_secs(coderoots_phase, worker_i, cl.strong_code_root_scan_time().seconds());
485 p->add_time_secs(objcopy_phase, worker_i, cl.strong_code_root_trim_partially_time().seconds());
486 }
487
488 // Closure used for updating rem sets. Only called during an evacuation pause.
489 class G1RefineCardClosure: public G1CardTableEntryClosure {
490 G1RemSet* _g1rs;
491 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
492
493 size_t _cards_scanned;
494 size_t _cards_skipped;
495 public:
496 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) :
497 _g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
498 {}
499
500 bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
501 // The only time we care about recording cards that
502 // contain references that point into the collection set
503 // is during RSet updating within an evacuation pause.
504 // In this case worker_i should be the id of a GC worker thread.
505 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
525 // Apply closure to log entries in the HCC.
526 if (G1HotCardCache::default_use_cache()) {
527 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
528
529 G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss);
530 G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
531 _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
532 }
533
534 // Now apply the closure to all remaining log entries.
535 {
536 G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
537
538 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss);
539 G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
540 _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
541
542 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
543 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
544 }
545 }
546
547 void G1RemSet::prepare_for_oops_into_collection_set_do() {
548 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
549 dcqs.concatenate_logs();
550
551 _scan_state->reset();
552 }
553
554 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
555 G1GCPhaseTimes* phase_times = _g1h->phase_times();
556
557 // Set all cards back to clean.
558 double start = os::elapsedTime();
559 _scan_state->clear_card_table(_g1h->workers());
560 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
561 }
562
563 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
564 #ifdef ASSERT
|