3367 #endif // ASSERT
3368
3369 // Initialize the GC alloc regions.
3370 _allocator->init_gc_alloc_regions(evacuation_info);
3371
3372 G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
3373 pre_evacuate_collection_set();
3374
3375 // Actually do the work...
3376 evacuate_collection_set(evacuation_info, &per_thread_states);
3377
3378 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3379
3380 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3381 free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
3382
3383 eagerly_reclaim_humongous_regions();
3384
3385 g1_policy()->clear_collection_set();
3386
3387 // Start a new incremental collection set for the next pause.
3388 g1_policy()->start_incremental_cset_building();
3389
3390 clear_cset_fast_test();
3391
3392 // Don't check the whole heap at this point as the
3393 // GC alloc regions from this pause have been tagged
3394 // as survivors and moved on to the survivor list.
3395 // Survivor regions will fail the !is_young() check.
3396 assert(check_young_list_empty(false /* check_heap */),
3397 "young list should be empty");
3398
3399 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3400 _young_list->first_survivor_region(),
3401 _young_list->last_survivor_region());
3402
3403 _young_list->reset_auxilary_lists();
3404
3405 if (evacuation_failed()) {
3406 set_used(recalculate_used());
4683 // evacuation failure occurs.
4684 NOT_PRODUCT(reset_evacuation_should_fail();)
4685 }
4686
4687 // Enqueue any remaining references remaining on the STW
4688 // reference processor's discovered lists. We need to do
4689 // this after the card table is cleaned (and verified) as
4690 // the act of enqueueing entries on to the pending list
4691 // will log these updates (and dirty their associated
4692 // cards). We need these updates logged to update any
4693 // RSets.
4694 if (g1_policy()->should_process_references()) {
4695 enqueue_discovered_references(per_thread_states);
4696 } else {
4697 g1_policy()->phase_times()->record_ref_enq_time(0);
4698 }
4699
4700 _allocator->release_gc_alloc_regions(evacuation_info);
4701
4702 merge_per_thread_state_info(per_thread_states);
4703
4704 record_obj_copy_mem_stats();
4705
4706 _survivor_evac_stats.adjust_desired_plab_sz();
4707 _old_evac_stats.adjust_desired_plab_sz();
4708
4709 // Reset and re-enable the hot card cache.
4710 // Note the counts for the cards in the regions in the
4711 // collection set are reset when the collection set is freed.
4712 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4713 hot_card_cache->reset_hot_cache();
4714 hot_card_cache->set_use_cache(true);
4715
4716 purge_code_root_memory();
4717
4718 redirty_logged_cards();
4719 #if defined(COMPILER2) || INCLUDE_JVMCI
4720 DerivedPointerTable::update_pointers();
4721 #endif
4722 }
4723
4724 void G1CollectedHeap::record_obj_copy_mem_stats() {
4725 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4726
4727 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
|
3367 #endif // ASSERT
3368
3369 // Initialize the GC alloc regions.
3370 _allocator->init_gc_alloc_regions(evacuation_info);
3371
3372 G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
3373 pre_evacuate_collection_set();
3374
3375 // Actually do the work...
3376 evacuate_collection_set(evacuation_info, &per_thread_states);
3377
3378 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3379
3380 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3381 free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
3382
3383 eagerly_reclaim_humongous_regions();
3384
3385 g1_policy()->clear_collection_set();
3386
3387 record_obj_copy_mem_stats();
3388 _survivor_evac_stats.adjust_desired_plab_sz();
3389 _old_evac_stats.adjust_desired_plab_sz();
3390
3391 // Start a new incremental collection set for the next pause.
3392 g1_policy()->start_incremental_cset_building();
3393
3394 clear_cset_fast_test();
3395
3396 // Don't check the whole heap at this point as the
3397 // GC alloc regions from this pause have been tagged
3398 // as survivors and moved on to the survivor list.
3399 // Survivor regions will fail the !is_young() check.
3400 assert(check_young_list_empty(false /* check_heap */),
3401 "young list should be empty");
3402
3403 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3404 _young_list->first_survivor_region(),
3405 _young_list->last_survivor_region());
3406
3407 _young_list->reset_auxilary_lists();
3408
3409 if (evacuation_failed()) {
3410 set_used(recalculate_used());
4687 // evacuation failure occurs.
4688 NOT_PRODUCT(reset_evacuation_should_fail();)
4689 }
4690
4691 // Enqueue any remaining references remaining on the STW
4692 // reference processor's discovered lists. We need to do
4693 // this after the card table is cleaned (and verified) as
4694 // the act of enqueueing entries on to the pending list
4695 // will log these updates (and dirty their associated
4696 // cards). We need these updates logged to update any
4697 // RSets.
4698 if (g1_policy()->should_process_references()) {
4699 enqueue_discovered_references(per_thread_states);
4700 } else {
4701 g1_policy()->phase_times()->record_ref_enq_time(0);
4702 }
4703
4704 _allocator->release_gc_alloc_regions(evacuation_info);
4705
4706 merge_per_thread_state_info(per_thread_states);
4707
4708 // Reset and re-enable the hot card cache.
4709 // Note the counts for the cards in the regions in the
4710 // collection set are reset when the collection set is freed.
4711 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
4712 hot_card_cache->reset_hot_cache();
4713 hot_card_cache->set_use_cache(true);
4714
4715 purge_code_root_memory();
4716
4717 redirty_logged_cards();
4718 #if defined(COMPILER2) || INCLUDE_JVMCI
4719 DerivedPointerTable::update_pointers();
4720 #endif
4721 }
4722
4723 void G1CollectedHeap::record_obj_copy_mem_stats() {
4724 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4725
4726 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
|