3487
3488 void G1CollectedHeap::print_all_rsets() {
3489 PrintRSetsClosure cl("Printing All RSets");;
3490 heap_region_iterate(&cl);
3491 }
3492 #endif // PRODUCT
3493
3494 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3495 YoungList* young_list = heap()->young_list();
3496
3497 size_t eden_used_bytes = young_list->eden_used_bytes();
3498 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3499
3500 size_t eden_capacity_bytes =
3501 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
3502
3503 VirtualSpaceSummary heap_summary = create_heap_space_summary();
3504 return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
3505 }
3506
3507 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
3508 const G1HeapSummary& heap_summary = create_g1_heap_summary();
3509 gc_tracer->report_gc_heap_summary(when, heap_summary);
3510
3511 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3512 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3513 }
3514
3515
3516 G1CollectedHeap* G1CollectedHeap::heap() {
3517 CollectedHeap* heap = Universe::heap();
3518 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3519 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3520 return (G1CollectedHeap*)heap;
3521 }
3522
3523 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3524 // always_do_update_barrier = false;
3525 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3526 // Fill TLAB's and such
5527 // to do this _before_ we retire the GC alloc regions
5528 // as we may have to copy some 'reachable' referent
5529 // objects (and their reachable sub-graphs) that were
5530 // not copied during the pause.
5531 process_discovered_references();
5532
5533 if (G1StringDedup::is_enabled()) {
5534 double fixup_start = os::elapsedTime();
5535
5536 G1STWIsAliveClosure is_alive(this);
5537 G1KeepAliveClosure keep_alive(this);
5538 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5539
5540 double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5541 phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5542 }
5543
5544 _allocator->release_gc_alloc_regions(evacuation_info);
5545 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5546
5547 // Reset and re-enable the hot card cache.
5548 // Note the counts for the cards in the regions in the
5549 // collection set are reset when the collection set is freed.
5550 hot_card_cache->reset_hot_cache();
5551 hot_card_cache->set_use_cache(true);
5552
5553 purge_code_root_memory();
5554
5555 if (evacuation_failed()) {
5556 remove_self_forwarding_pointers();
5557
5558 // Reset the G1EvacuationFailureALot counters and flags
5559 // Note: the values are reset only when an actual
5560 // evacuation failure occurs.
5561 NOT_PRODUCT(reset_evacuation_should_fail();)
5562 }
5563
5564 // Enqueue any remaining references remaining on the STW
5565 // reference processor's discovered lists. We need to do
5566 // this after the card table is cleaned (and verified) as
5567 // the act of enqueueing entries on to the pending list
5568 // will log these updates (and dirty their associated
5569 // cards). We need these updates logged to update any
5570 // RSets.
5571 enqueue_discovered_references();
5572
5573 redirty_logged_cards();
5574 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5575 }
5576
5577 void G1CollectedHeap::free_region(HeapRegion* hr,
5578 FreeRegionList* free_list,
5579 bool par,
5580 bool locked) {
5581 assert(!hr->is_free(), "the region should not be free");
5582 assert(!hr->is_empty(), "the region should not be empty");
5583 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5584 assert(free_list != NULL, "pre-condition");
5585
5586 if (G1VerifyBitmaps) {
5587 MemRegion mr(hr->bottom(), hr->end());
5588 concurrent_mark()->clearRangePrevBitmap(mr);
5589 }
5590
5591 // Clear the card counts for this region.
5592 // Note: we only need to do this if the region is not young
5593 // (since we don't refine cards in young regions).
5594 if (!hr->is_young()) {
|
3487
3488 void G1CollectedHeap::print_all_rsets() {
3489 PrintRSetsClosure cl("Printing All RSets");;
3490 heap_region_iterate(&cl);
3491 }
3492 #endif // PRODUCT
3493
3494 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3495 YoungList* young_list = heap()->young_list();
3496
3497 size_t eden_used_bytes = young_list->eden_used_bytes();
3498 size_t survivor_used_bytes = young_list->survivor_used_bytes();
3499
3500 size_t eden_capacity_bytes =
3501 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
3502
3503 VirtualSpaceSummary heap_summary = create_heap_space_summary();
3504 return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
3505 }
3506
3507 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
3508 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
3509 stats->unused(), stats->used(), stats->region_end_waste(),
3510 stats->regions_filled(), stats->direct_allocated(),
3511 stats->failure_used(), stats->failure_waste());
3512 }
3513
3514 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
3515 const G1HeapSummary& heap_summary = create_g1_heap_summary();
3516 gc_tracer->report_gc_heap_summary(when, heap_summary);
3517
3518 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3519 gc_tracer->report_metaspace_summary(when, metaspace_summary);
3520 }
3521
3522
3523 G1CollectedHeap* G1CollectedHeap::heap() {
3524 CollectedHeap* heap = Universe::heap();
3525 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3526 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3527 return (G1CollectedHeap*)heap;
3528 }
3529
3530 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3531 // always_do_update_barrier = false;
3532 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3533 // Fill TLAB's and such
5534 // to do this _before_ we retire the GC alloc regions
5535 // as we may have to copy some 'reachable' referent
5536 // objects (and their reachable sub-graphs) that were
5537 // not copied during the pause.
5538 process_discovered_references();
5539
5540 if (G1StringDedup::is_enabled()) {
5541 double fixup_start = os::elapsedTime();
5542
5543 G1STWIsAliveClosure is_alive(this);
5544 G1KeepAliveClosure keep_alive(this);
5545 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5546
5547 double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5548 phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5549 }
5550
5551 _allocator->release_gc_alloc_regions(evacuation_info);
5552 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5553
5554 record_obj_copy_mem_stats();
5555
5556 // Reset and re-enable the hot card cache.
5557 // Note the counts for the cards in the regions in the
5558 // collection set are reset when the collection set is freed.
5559 hot_card_cache->reset_hot_cache();
5560 hot_card_cache->set_use_cache(true);
5561
5562 purge_code_root_memory();
5563
5564 if (evacuation_failed()) {
5565 remove_self_forwarding_pointers();
5566
5567 // Reset the G1EvacuationFailureALot counters and flags
5568 // Note: the values are reset only when an actual
5569 // evacuation failure occurs.
5570 NOT_PRODUCT(reset_evacuation_should_fail();)
5571 }
5572
5573 // Enqueue any remaining references remaining on the STW
5574 // reference processor's discovered lists. We need to do
5575 // this after the card table is cleaned (and verified) as
5576 // the act of enqueueing entries on to the pending list
5577 // will log these updates (and dirty their associated
5578 // cards). We need these updates logged to update any
5579 // RSets.
5580 enqueue_discovered_references();
5581
5582 redirty_logged_cards();
5583 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5584 }
5585
5586 void G1CollectedHeap::record_obj_copy_mem_stats() {
5587 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5588 create_g1_evac_summary(&_old_evac_stats));
5589 }
5590
5591 void G1CollectedHeap::free_region(HeapRegion* hr,
5592 FreeRegionList* free_list,
5593 bool par,
5594 bool locked) {
5595 assert(!hr->is_free(), "the region should not be free");
5596 assert(!hr->is_empty(), "the region should not be empty");
5597 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5598 assert(free_list != NULL, "pre-condition");
5599
5600 if (G1VerifyBitmaps) {
5601 MemRegion mr(hr->bottom(), hr->end());
5602 concurrent_mark()->clearRangePrevBitmap(mr);
5603 }
5604
5605 // Clear the card counts for this region.
5606 // Note: we only need to do this if the region is not young
5607 // (since we don't refine cards in young regions).
5608 if (!hr->is_young()) {
|