512 for (uint j = 0; j < _max_num_tasks; ++j) {
513 _tasks[j]->clear_mark_stats_cache(region_idx);
514 }
515 _top_at_rebuild_starts[region_idx] = NULL;
516 _region_mark_stats[region_idx].clear();
517 }
518
519 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
520 uint const region_idx = r->hrm_index();
521 if (r->is_humongous()) {
522 assert(r->is_starts_humongous(), "Got humongous continues region here");
523 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
524 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
525 clear_statistics_in_region(j);
526 }
527 } else {
528 clear_statistics_in_region(region_idx);
529 }
530 }
531
532 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
533 assert_at_safepoint_on_vm_thread();
534
535 // Need to clear mark bit of the humongous object.
536 if (_next_mark_bitmap->is_marked(r->bottom())) {
537 _next_mark_bitmap->clear(r->bottom());
538 }
539
540 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
541 return;
542 }
543
544 // Clear any statistics about the region gathered so far.
545 clear_statistics(r);
546 }
547
548 void G1ConcurrentMark::reset_marking_for_restart() {
549 _global_mark_stack.set_empty();
550
551 // Expand the marking stack, if we have to and if we can.
552 if (has_overflown()) {
553 _global_mark_stack.expand();
554
555 uint max_regions = _g1h->max_regions();
556 for (uint i = 0; i < max_regions; i++) {
557 _region_mark_stats[i].clear_during_overflow();
558 }
559 }
560
561 clear_has_overflown();
|
512 for (uint j = 0; j < _max_num_tasks; ++j) {
513 _tasks[j]->clear_mark_stats_cache(region_idx);
514 }
515 _top_at_rebuild_starts[region_idx] = NULL;
516 _region_mark_stats[region_idx].clear();
517 }
518
519 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
520 uint const region_idx = r->hrm_index();
521 if (r->is_humongous()) {
522 assert(r->is_starts_humongous(), "Got humongous continues region here");
523 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
524 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
525 clear_statistics_in_region(j);
526 }
527 } else {
528 clear_statistics_in_region(region_idx);
529 }
530 }
531
532 static void maybe_clear_bitmap_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
533 if (bitmap->is_marked(addr)) {
534 bitmap->clear(addr);
535 }
536 }
537
538 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
539 assert_at_safepoint_on_vm_thread();
540
541 // Need to clear all mark bits of the humongous object.
542 maybe_clear_bitmap_if_set(_prev_mark_bitmap, r->bottom());
543
544 G1CollectorState* collector_state = _g1h->collector_state();
545 if (collector_state->mark_or_rebuild_in_progress() ||
546 collector_state->clearing_next_bitmap()) {
547 maybe_clear_bitmap_if_set(_next_mark_bitmap, r->bottom());
548 }
549
550 // Clear any statistics about the region gathered so far.
551 clear_statistics(r);
552 }
553
554 void G1ConcurrentMark::reset_marking_for_restart() {
555 _global_mark_stack.set_empty();
556
557 // Expand the marking stack, if we have to and if we can.
558 if (has_overflown()) {
559 _global_mark_stack.expand();
560
561 uint max_regions = _g1h->max_regions();
562 for (uint i = 0; i < max_regions; i++) {
563 _region_mark_stats[i].clear_during_overflow();
564 }
565 }
566
567 clear_has_overflown();
|