954
955 // Temporarily disable pretouching of heap pages. This interface is used
956 // when mmap'ing archived heap data in, so pre-touching is wasted.
957 FlagSetting fs(AlwaysPreTouch, false);
958
959 // Enable archive object checking in G1MarkSweep. We have to let it know
960 // about each archive range, so that objects in those ranges aren't marked.
961 G1MarkSweep::enable_archive_object_check();
962
963 // For each specified MemRegion range, allocate the corresponding G1
964 // regions and mark them as archive regions. We expect the ranges in
965 // ascending starting address order, without overlap.
966 for (size_t i = 0; i < count; i++) {
967 MemRegion curr_range = ranges[i];
968 HeapWord* start_address = curr_range.start();
969 size_t word_size = curr_range.word_size();
970 HeapWord* last_address = curr_range.last();
971 size_t commits = 0;
972
973 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
974 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
975 p2i(start_address), p2i(last_address)));
976 guarantee(start_address > prev_last_addr,
977 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
978 p2i(start_address), p2i(prev_last_addr)));
979 prev_last_addr = last_address;
980
981 // Check for ranges that start in the same G1 region in which the previous
982 // range ended, and adjust the start address so we don't try to allocate
983 // the same region again. If the current range is entirely within that
984 // region, skip it, just adjusting the recorded top.
985 HeapRegion* start_region = _hrm.addr_to_region(start_address);
986 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
987 start_address = start_region->end();
988 if (start_address > last_address) {
989 increase_used(word_size * HeapWordSize);
990 start_region->set_top(last_address + 1);
991 continue;
992 }
993 start_region->set_top(start_address);
994 curr_range = MemRegion(start_address, last_address + 1);
995 start_region = _hrm.addr_to_region(start_address);
996 }
997
998 // Perform the actual region allocation, exiting if it fails.
1000 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
1001 return false;
1002 }
1003 increase_used(word_size * HeapWordSize);
1004 if (commits != 0) {
1005 ergo_verbose1(ErgoHeapSizing,
1006 "attempt heap expansion",
1007 ergo_format_reason("allocate archive regions")
1008 ergo_format_byte("total size"),
1009 HeapRegion::GrainWords * HeapWordSize * commits);
1010 }
1011
1012 // Mark each G1 region touched by the range as archive, add it to the old set,
1013 // and set the allocation context and top.
1014 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
1015 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1016 prev_last_region = last_region;
1017
1018 while (curr_region != NULL) {
1019 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1020 err_msg("Region already in use (index %u)", curr_region->hrm_index()));
1021 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1022 curr_region->set_allocation_context(AllocationContext::system());
1023 curr_region->set_archive();
1024 _old_set.add(curr_region);
1025 if (curr_region != last_region) {
1026 curr_region->set_top(curr_region->end());
1027 curr_region = _hrm.next_region_in_heap(curr_region);
1028 } else {
1029 curr_region->set_top(last_address + 1);
1030 curr_region = NULL;
1031 }
1032 }
1033
1034 // Notify mark-sweep of the archive range.
1035 G1MarkSweep::set_range_archive(curr_range, true);
1036 }
1037 return true;
1038 }
1039
1040 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1041 assert(!is_init_completed(), "Expect to be called at JVM init time");
1042 assert(ranges != NULL, "MemRegion array NULL");
1043 assert(count != 0, "No MemRegions provided");
1044 MemRegion reserved = _hrm.reserved();
1045 HeapWord *prev_last_addr = NULL;
1046 HeapRegion* prev_last_region = NULL;
1047
1048 // For each MemRegion, create filler objects, if needed, in the G1 regions
1049 // that contain the address range. The address range actually within the
1050 // MemRegion will not be modified. That is assumed to have been initialized
1051 // elsewhere, probably via an mmap of archived heap data.
1052 MutexLockerEx x(Heap_lock);
1053 for (size_t i = 0; i < count; i++) {
1054 HeapWord* start_address = ranges[i].start();
1055 HeapWord* last_address = ranges[i].last();
1056
1057 assert(reserved.contains(start_address) && reserved.contains(last_address),
1058 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1059 p2i(start_address), p2i(last_address)));
1060 assert(start_address > prev_last_addr,
1061 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1062 p2i(start_address), p2i(prev_last_addr)));
1063
1064 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1065 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1066 HeapWord* bottom_address = start_region->bottom();
1067
1068 // Check for a range beginning in the same region in which the
1069 // previous one ended.
1070 if (start_region == prev_last_region) {
1071 bottom_address = prev_last_addr + 1;
1072 }
1073
1074 // Verify that the regions were all marked as archive regions by
1075 // alloc_archive_regions.
1076 HeapRegion* curr_region = start_region;
1077 while (curr_region != NULL) {
1078 guarantee(curr_region->is_archive(),
1079 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
1080 if (curr_region != last_region) {
1081 curr_region = _hrm.next_region_in_heap(curr_region);
1082 } else {
1083 curr_region = NULL;
1084 }
1085 }
1086
1087 prev_last_addr = last_address;
1088 prev_last_region = last_region;
1089
1090 // Fill the memory below the allocated range with dummy object(s),
1091 // if the region bottom does not match the range start, or if the previous
1092 // range ended within the same G1 region, and there is a gap.
1093 if (start_address != bottom_address) {
1094 size_t fill_size = pointer_delta(start_address, bottom_address);
1095 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
1096 increase_used(fill_size * HeapWordSize);
1097 }
1098 }
1099 }
1122 }
1123
1124 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
1125 assert(!is_init_completed(), "Expect to be called at JVM init time");
1126 assert(ranges != NULL, "MemRegion array NULL");
1127 assert(count != 0, "No MemRegions provided");
1128 MemRegion reserved = _hrm.reserved();
1129 HeapWord* prev_last_addr = NULL;
1130 HeapRegion* prev_last_region = NULL;
1131 size_t size_used = 0;
1132 size_t uncommitted_regions = 0;
1133
1134 // For each Memregion, free the G1 regions that constitute it, and
1135 // notify mark-sweep that the range is no longer to be considered 'archive.'
1136 MutexLockerEx x(Heap_lock);
1137 for (size_t i = 0; i < count; i++) {
1138 HeapWord* start_address = ranges[i].start();
1139 HeapWord* last_address = ranges[i].last();
1140
1141 assert(reserved.contains(start_address) && reserved.contains(last_address),
1142 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1143 p2i(start_address), p2i(last_address)));
1144 assert(start_address > prev_last_addr,
1145 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1146 p2i(start_address), p2i(prev_last_addr)));
1147 size_used += ranges[i].byte_size();
1148 prev_last_addr = last_address;
1149
1150 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1151 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1152
1153 // Check for ranges that start in the same G1 region in which the previous
1154 // range ended, and adjust the start address so we don't try to free
1155 // the same region again. If the current range is entirely within that
1156 // region, skip it.
1157 if (start_region == prev_last_region) {
1158 start_address = start_region->end();
1159 if (start_address > last_address) {
1160 continue;
1161 }
1162 start_region = _hrm.addr_to_region(start_address);
1163 }
1164 prev_last_region = last_region;
1165
1166 // After verifying that each region was marked as an archive region by
1167 // alloc_archive_regions, set it free and empty and uncommit it.
1168 HeapRegion* curr_region = start_region;
1169 while (curr_region != NULL) {
1170 guarantee(curr_region->is_archive(),
1171 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
1172 uint curr_index = curr_region->hrm_index();
1173 _old_set.remove(curr_region);
1174 curr_region->set_free();
1175 curr_region->set_top(curr_region->bottom());
1176 if (curr_region != last_region) {
1177 curr_region = _hrm.next_region_in_heap(curr_region);
1178 } else {
1179 curr_region = NULL;
1180 }
1181 _hrm.shrink_at(curr_index, 1);
1182 uncommitted_regions++;
1183 }
1184
1185 // Notify mark-sweep that this is no longer an archive range.
1186 G1MarkSweep::set_range_archive(ranges[i], false);
1187 }
1188
1189 if (uncommitted_regions != 0) {
1190 ergo_verbose1(ErgoHeapSizing,
1191 "attempt heap shrinking",
1738 // 32-bit size_t's.
1739 double used_after_gc_d = (double) used_after_gc;
1740 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1741 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1742
1743 // Let's make sure that they are both under the max heap size, which
1744 // by default will make them fit into a size_t.
1745 double desired_capacity_upper_bound = (double) max_heap_size;
1746 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1747 desired_capacity_upper_bound);
1748 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1749 desired_capacity_upper_bound);
1750
1751 // We can now safely turn them into size_t's.
1752 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1753 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1754
1755 // This assert only makes sense here, before we adjust them
1756 // with respect to the min and max heap size.
1757 assert(minimum_desired_capacity <= maximum_desired_capacity,
1758 err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "
1759 "maximum_desired_capacity = " SIZE_FORMAT,
1760 minimum_desired_capacity, maximum_desired_capacity));
1761
1762 // Should not be greater than the heap max size. No need to adjust
1763 // it with respect to the heap min size as it's a lower bound (i.e.,
1764 // we'll try to make the capacity larger than it, not smaller).
1765 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1766 // Should not be less than the heap min size. No need to adjust it
1767 // with respect to the heap max size as it's an upper bound (i.e.,
1768 // we'll try to make the capacity smaller than it, not greater).
1769 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1770
1771 if (capacity_after_gc < minimum_desired_capacity) {
1772 // Don't expand unless it's significant
1773 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1774 ergo_verbose4(ErgoHeapSizing,
1775 "attempt heap expansion",
1776 ergo_format_reason("capacity lower than "
1777 "min desired capacity after Full GC")
1778 ergo_format_byte("capacity")
1779 ergo_format_byte("occupancy")
1780 ergo_format_byte_perc("min desired capacity"),
2522
2523 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2524 // Let's use the existing mechanism for the allocation
2525 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2526 AllocationContext::system());
2527 if (dummy_obj != NULL) {
2528 MemRegion mr(dummy_obj, word_size);
2529 CollectedHeap::fill_with_object(mr);
2530 } else {
2531 // If we can't allocate once, we probably cannot allocate
2532 // again. Let's get out of the loop.
2533 break;
2534 }
2535 }
2536 }
2537 #endif // !PRODUCT
2538
2539 void G1CollectedHeap::increment_old_marking_cycles_started() {
2540 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2541 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2542 err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2543 _old_marking_cycles_started, _old_marking_cycles_completed));
2544
2545 _old_marking_cycles_started++;
2546 }
2547
2548 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2549 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2550
2551 // We assume that if concurrent == true, then the caller is a
2552 // concurrent thread that was joined the Suspendible Thread
2553 // Set. If there's ever a cheap way to check this, we should add an
2554 // assert here.
2555
2556 // Given that this method is called at the end of a Full GC or of a
2557 // concurrent cycle, and those can be nested (i.e., a Full GC can
2558 // interrupt a concurrent cycle), the number of full collections
2559 // completed should be either one (in the case where there was no
2560 // nesting) or two (when a Full GC interrupted a concurrent cycle)
2561 // behind the number of full collections started.
2562
2563 // This is the case for the inner caller, i.e. a Full GC.
2564 assert(concurrent ||
2565 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2566 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2567 err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2568 "is inconsistent with _old_marking_cycles_completed = %u",
2569 _old_marking_cycles_started, _old_marking_cycles_completed));
2570
2571 // This is the case for the outer caller, i.e. the concurrent cycle.
2572 assert(!concurrent ||
2573 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2574 err_msg("for outer caller (concurrent cycle): "
2575 "_old_marking_cycles_started = %u "
2576 "is inconsistent with _old_marking_cycles_completed = %u",
2577 _old_marking_cycles_started, _old_marking_cycles_completed));
2578
2579 _old_marking_cycles_completed += 1;
2580
2581 // We need to clear the "in_progress" flag in the CM thread before
2582 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2583 // is set) so that if a waiter requests another System.gc() it doesn't
2584 // incorrectly see that a marking cycle is still in progress.
2585 if (concurrent) {
2586 _cmThread->set_idle();
2587 }
2588
2589 // This notify_all() will ensure that a thread that called
2590 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2591 // and it's waiting for a full GC to finish will be woken up. It is
2592 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2593 FullGCCount_lock->notify_all();
2594 }
2595
2596 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2597 collector_state()->set_concurrent_cycle_started(true);
3107 public:
3108 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3109 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3110 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3111
3112 int count() { return _count; }
3113 void reset_count() { _count = 0; };
3114 };
3115
3116 class VerifyKlassClosure: public KlassClosure {
3117 YoungRefCounterClosure _young_ref_counter_closure;
3118 OopClosure *_oop_closure;
3119 public:
3120 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3121 void do_klass(Klass* k) {
3122 k->oops_do(_oop_closure);
3123
3124 _young_ref_counter_closure.reset_count();
3125 k->oops_do(&_young_ref_counter_closure);
3126 if (_young_ref_counter_closure.count() > 0) {
3127 guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)));
3128 }
3129 }
3130 };
3131
3132 class VerifyLivenessOopClosure: public OopClosure {
3133 G1CollectedHeap* _g1h;
3134 VerifyOption _vo;
3135 public:
3136 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3137 _g1h(g1h), _vo(vo)
3138 { }
3139 void do_oop(narrowOop *p) { do_oop_work(p); }
3140 void do_oop( oop *p) { do_oop_work(p); }
3141
3142 template <class T> void do_oop_work(T *p) {
3143 oop obj = oopDesc::load_decode_heap_oop(p);
3144 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3145 "Dead object referenced by a not dead object");
3146 }
3147 };
3177
3178 o->oop_iterate_no_header(&isLive);
3179 if (!_hr->obj_allocated_since_prev_marking(o)) {
3180 size_t obj_size = o->size(); // Make sure we don't overflow
3181 _live_bytes += (obj_size * HeapWordSize);
3182 }
3183 }
3184 }
3185 size_t live_bytes() { return _live_bytes; }
3186 };
3187
3188 class VerifyArchiveOopClosure: public OopClosure {
3189 public:
3190 VerifyArchiveOopClosure(HeapRegion *hr) { }
3191 void do_oop(narrowOop *p) { do_oop_work(p); }
3192 void do_oop( oop *p) { do_oop_work(p); }
3193
3194 template <class T> void do_oop_work(T *p) {
3195 oop obj = oopDesc::load_decode_heap_oop(p);
3196 guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
3197 err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
3198 p2i(p), p2i(obj)));
3199 }
3200 };
3201
3202 class VerifyArchiveRegionClosure: public ObjectClosure {
3203 public:
3204 VerifyArchiveRegionClosure(HeapRegion *hr) { }
3205 // Verify that all object pointers are to archive regions.
3206 void do_object(oop o) {
3207 VerifyArchiveOopClosure checkOop(NULL);
3208 assert(o != NULL, "Should not be here for NULL oops");
3209 o->oop_iterate_no_header(&checkOop);
3210 }
3211 };
3212
3213 class VerifyRegionClosure: public HeapRegionClosure {
3214 private:
3215 bool _par;
3216 VerifyOption _vo;
3217 bool _failures;
3218 public:
4692
4693 public:
4694 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4695 AbstractGangTask("String/Symbol Unlinking"),
4696 _is_alive(is_alive),
4697 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4698 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4699
4700 _initial_string_table_size = StringTable::the_table()->table_size();
4701 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4702 if (process_strings) {
4703 StringTable::clear_parallel_claimed_index();
4704 }
4705 if (process_symbols) {
4706 SymbolTable::clear_parallel_claimed_index();
4707 }
4708 }
4709
4710 ~G1StringSymbolTableUnlinkTask() {
4711 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4712 err_msg("claim value %d after unlink less than initial string table size %d",
4713 StringTable::parallel_claimed_index(), _initial_string_table_size));
4714 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4715 err_msg("claim value %d after unlink less than initial symbol table size %d",
4716 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4717
4718 if (G1TraceStringSymbolTableScrubbing) {
4719 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4720 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4721 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4722 strings_processed(), strings_removed(),
4723 symbols_processed(), symbols_removed());
4724 }
4725 }
4726
4727 void work(uint worker_id) {
4728 int strings_processed = 0;
4729 int strings_removed = 0;
4730 int symbols_processed = 0;
4731 int symbols_removed = 0;
4732 if (_process_strings) {
4733 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4734 Atomic::add(strings_processed, &_strings_processed);
4735 Atomic::add(strings_removed, &_strings_removed);
4736 }
5096 // Non Copying Keep Alive closure
5097 class G1KeepAliveClosure: public OopClosure {
5098 G1CollectedHeap* _g1;
5099 public:
5100 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5101 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5102 void do_oop(oop* p) {
5103 oop obj = *p;
5104 assert(obj != NULL, "the caller should have filtered out NULL values");
5105
5106 const InCSetState cset_state = _g1->in_cset_state(obj);
5107 if (!cset_state.is_in_cset_or_humongous()) {
5108 return;
5109 }
5110 if (cset_state.is_in_cset()) {
5111 assert( obj->is_forwarded(), "invariant" );
5112 *p = obj->forwardee();
5113 } else {
5114 assert(!obj->is_forwarded(), "invariant" );
5115 assert(cset_state.is_humongous(),
5116 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5117 _g1->set_humongous_is_live(obj);
5118 }
5119 }
5120 };
5121
5122 // Copying Keep Alive closure - can be called from both
5123 // serial and parallel code as long as different worker
5124 // threads utilize different G1ParScanThreadState instances
5125 // and different queues.
5126
5127 class G1CopyingKeepAliveClosure: public OopClosure {
5128 G1CollectedHeap* _g1h;
5129 OopClosure* _copy_non_heap_obj_cl;
5130 G1ParScanThreadState* _par_scan_state;
5131
5132 public:
5133 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5134 OopClosure* non_heap_obj_cl,
5135 G1ParScanThreadState* pss):
5136 _g1h(g1h),
5150 // evacuation failure) then we need to update the reference
5151 // field and, if both reference and referent are in the G1
5152 // heap, update the RSet for the referent.
5153 //
5154 // If the referent has not been forwarded then we have to keep
5155 // it alive by policy. Therefore we have copy the referent.
5156 //
5157 // If the reference field is in the G1 heap then we can push
5158 // on the PSS queue. When the queue is drained (after each
5159 // phase of reference processing) the object and it's followers
5160 // will be copied, the reference field set to point to the
5161 // new location, and the RSet updated. Otherwise we need to
5162 // use the the non-heap or metadata closures directly to copy
5163 // the referent object and update the pointer, while avoiding
5164 // updating the RSet.
5165
5166 if (_g1h->is_in_g1_reserved(p)) {
5167 _par_scan_state->push_on_queue(p);
5168 } else {
5169 assert(!Metaspace::contains((const void*)p),
5170 err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p)));
5171 _copy_non_heap_obj_cl->do_oop(p);
5172 }
5173 }
5174 }
5175 };
5176
5177 // Serial drain queue closure. Called as the 'complete_gc'
5178 // closure for each discovered list in some of the
5179 // reference processing phases.
5180
5181 class G1STWDrainQueueClosure: public VoidClosure {
5182 protected:
5183 G1CollectedHeap* _g1h;
5184 G1ParScanThreadState* _par_scan_state;
5185
5186 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
5187
5188 public:
5189 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5190 _g1h(g1h),
5806 ct_bs->verify_g1_young_region(mr);
5807 } else {
5808 ct_bs->verify_dirty_region(mr);
5809 }
5810 }
5811
5812 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5813 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5814 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5815 verify_dirty_region(hr);
5816 }
5817 }
5818
5819 void G1CollectedHeap::verify_dirty_young_regions() {
5820 verify_dirty_young_list(_young_list->first_region());
5821 }
5822
5823 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5824 HeapWord* tams, HeapWord* end) {
5825 guarantee(tams <= end,
5826 err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)));
5827 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5828 if (result < end) {
5829 gclog_or_tty->cr();
5830 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5831 bitmap_name, p2i(result));
5832 gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5833 bitmap_name, p2i(tams), p2i(end));
5834 return false;
5835 }
5836 return true;
5837 }
5838
5839 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5840 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5841 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5842
5843 HeapWord* bottom = hr->bottom();
5844 HeapWord* ptams = hr->prev_top_at_mark_start();
5845 HeapWord* ntams = hr->next_top_at_mark_start();
5846 HeapWord* end = hr->end();
6157 !r->rem_set()->is_empty()) {
6158
6159 if (G1TraceEagerReclaimHumongousObjects) {
6160 gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6161 region_idx,
6162 (size_t)obj->size() * HeapWordSize,
6163 p2i(r->bottom()),
6164 r->region_num(),
6165 r->rem_set()->occupied(),
6166 r->rem_set()->strong_code_roots_list_length(),
6167 next_bitmap->isMarked(r->bottom()),
6168 g1h->is_humongous_reclaim_candidate(region_idx),
6169 obj->is_typeArray()
6170 );
6171 }
6172
6173 return false;
6174 }
6175
6176 guarantee(obj->is_typeArray(),
6177 err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6178 PTR_FORMAT " is not.",
6179 p2i(r->bottom())));
6180
6181 if (G1TraceEagerReclaimHumongousObjects) {
6182 gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6183 region_idx,
6184 (size_t)obj->size() * HeapWordSize,
6185 p2i(r->bottom()),
6186 r->region_num(),
6187 r->rem_set()->occupied(),
6188 r->rem_set()->strong_code_roots_list_length(),
6189 next_bitmap->isMarked(r->bottom()),
6190 g1h->is_humongous_reclaim_candidate(region_idx),
6191 obj->is_typeArray()
6192 );
6193 }
6194 // Need to clear mark bit of the humongous object if already set.
6195 if (next_bitmap->isMarked(r->bottom())) {
6196 next_bitmap->clear(r->bottom());
6197 }
6198 _freed_bytes += r->used();
6199 r->set_containing_set(NULL);
6388 assert_at_safepoint(true /* should_be_vm_thread */);
6389
6390 if (!free_list_only) {
6391 TearDownRegionSetsClosure cl(&_old_set);
6392 heap_region_iterate(&cl);
6393
6394 // Note that emptying the _young_list is postponed and instead done as
6395 // the first step when rebuilding the regions sets again. The reason for
6396 // this is that during a full GC string deduplication needs to know if
6397 // a collected region was young or old when the full GC was initiated.
6398 }
6399 _hrm.remove_all_free_regions();
6400 }
6401
6402 void G1CollectedHeap::increase_used(size_t bytes) {
6403 _summary_bytes_used += bytes;
6404 }
6405
6406 void G1CollectedHeap::decrease_used(size_t bytes) {
6407 assert(_summary_bytes_used >= bytes,
6408 err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
6409 _summary_bytes_used, bytes));
6410 _summary_bytes_used -= bytes;
6411 }
6412
6413 void G1CollectedHeap::set_used(size_t bytes) {
6414 _summary_bytes_used = bytes;
6415 }
6416
6417 class RebuildRegionSetsClosure : public HeapRegionClosure {
6418 private:
6419 bool _free_list_only;
6420 HeapRegionSet* _old_set;
6421 HeapRegionManager* _hrm;
6422 size_t _total_used;
6423
6424 public:
6425 RebuildRegionSetsClosure(bool free_list_only,
6426 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6427 _free_list_only(free_list_only),
6428 _old_set(old_set), _hrm(hrm), _total_used(0) {
6429 assert(_hrm->num_free_regions() == 0, "pre-condition");
6471 }
6472 };
6473
6474 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6475 assert_at_safepoint(true /* should_be_vm_thread */);
6476
6477 if (!free_list_only) {
6478 _young_list->empty_list();
6479 }
6480
6481 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6482 heap_region_iterate(&cl);
6483
6484 if (!free_list_only) {
6485 set_used(cl.total_used());
6486 if (_archive_allocator != NULL) {
6487 _archive_allocator->clear_used();
6488 }
6489 }
6490 assert(used_unlocked() == recalculate_used(),
6491 err_msg("inconsistent used_unlocked(), "
6492 "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
6493 used_unlocked(), recalculate_used()));
6494 }
6495
6496 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6497 _refine_cte_cl->set_concurrent(concurrent);
6498 }
6499
6500 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6501 HeapRegion* hr = heap_region_containing(p);
6502 return hr->is_in(p);
6503 }
6504
6505 // Methods for the mutator alloc region
6506
6507 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6508 bool force) {
6509 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6510 assert(!force || g1_policy()->can_expand_young_list(),
6511 "if force is true we should be able to expand the young list");
6512 bool young_list_full = g1_policy()->is_young_list_full();
6513 if (force || !young_list_full) {
6614
6615 public:
6616 HeapRegionSetCount _old_count;
6617 HeapRegionSetCount _humongous_count;
6618 HeapRegionSetCount _free_count;
6619
6620 VerifyRegionListsClosure(HeapRegionSet* old_set,
6621 HeapRegionSet* humongous_set,
6622 HeapRegionManager* hrm) :
6623 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6624 _old_count(), _humongous_count(), _free_count(){ }
6625
6626 bool doHeapRegion(HeapRegion* hr) {
6627 if (hr->is_continues_humongous()) {
6628 return false;
6629 }
6630
6631 if (hr->is_young()) {
6632 // TODO
6633 } else if (hr->is_starts_humongous()) {
6634 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6635 _humongous_count.increment(1u, hr->capacity());
6636 } else if (hr->is_empty()) {
6637 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6638 _free_count.increment(1u, hr->capacity());
6639 } else if (hr->is_old()) {
6640 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6641 _old_count.increment(1u, hr->capacity());
6642 } else {
6643 // There are no other valid region types. Check for one invalid
6644 // one we can identify: pinned without old or humongous set.
6645 assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
6646 ShouldNotReachHere();
6647 }
6648 return false;
6649 }
6650
6651 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6652 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6653 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6654 old_set->total_capacity_bytes(), _old_count.capacity()));
6655
6656 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6657 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6658 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6659
6660 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6661 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6662 free_list->total_capacity_bytes(), _free_count.capacity()));
6663 }
6664 };
6665
6666 void G1CollectedHeap::verify_region_sets() {
6667 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6668
6669 // First, check the explicit lists.
6670 _hrm.verify();
6671 {
6672 // Given that a concurrent operation might be adding regions to
6673 // the secondary free list we have to take the lock before
6674 // verifying it.
6675 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6676 _secondary_free_list.verify_list();
6677 }
6678
6679 // If a concurrent region freeing operation is in progress it will
6680 // be difficult to correctly attributed any free regions we come
6681 // across to the correct free list given that they might belong to
6682 // one of several (free_list, secondary_free_list, any local lists,
6698 // Finally, make sure that the region accounting in the lists is
6699 // consistent with what we see in the heap.
6700
6701 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6702 heap_region_iterate(&cl);
6703 cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6704 }
6705
6706 // Optimized nmethod scanning
6707
6708 class RegisterNMethodOopClosure: public OopClosure {
6709 G1CollectedHeap* _g1h;
6710 nmethod* _nm;
6711
6712 template <class T> void do_oop_work(T* p) {
6713 T heap_oop = oopDesc::load_heap_oop(p);
6714 if (!oopDesc::is_null(heap_oop)) {
6715 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6716 HeapRegion* hr = _g1h->heap_region_containing(obj);
6717 assert(!hr->is_continues_humongous(),
6718 err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6719 " starting at " HR_FORMAT,
6720 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6721
6722 // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6723 hr->add_strong_code_root_locked(_nm);
6724 }
6725 }
6726
6727 public:
6728 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6729 _g1h(g1h), _nm(nm) {}
6730
6731 void do_oop(oop* p) { do_oop_work(p); }
6732 void do_oop(narrowOop* p) { do_oop_work(p); }
6733 };
6734
6735 class UnregisterNMethodOopClosure: public OopClosure {
6736 G1CollectedHeap* _g1h;
6737 nmethod* _nm;
6738
6739 template <class T> void do_oop_work(T* p) {
6740 T heap_oop = oopDesc::load_heap_oop(p);
6741 if (!oopDesc::is_null(heap_oop)) {
6742 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6743 HeapRegion* hr = _g1h->heap_region_containing(obj);
6744 assert(!hr->is_continues_humongous(),
6745 err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6746 " starting at " HR_FORMAT,
6747 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6748
6749 hr->remove_strong_code_root(_nm);
6750 }
6751 }
6752
6753 public:
6754 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6755 _g1h(g1h), _nm(nm) {}
6756
6757 void do_oop(oop* p) { do_oop_work(p); }
6758 void do_oop(narrowOop* p) { do_oop_work(p); }
6759 };
6760
6761 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6762 CollectedHeap::register_nmethod(nm);
6763
6764 guarantee(nm != NULL, "sanity");
6765 RegisterNMethodOopClosure reg_cl(this, nm);
6766 nm->oops_do(®_cl);
6767 }
|
954
955 // Temporarily disable pretouching of heap pages. This interface is used
956 // when mmap'ing archived heap data in, so pre-touching is wasted.
957 FlagSetting fs(AlwaysPreTouch, false);
958
959 // Enable archive object checking in G1MarkSweep. We have to let it know
960 // about each archive range, so that objects in those ranges aren't marked.
961 G1MarkSweep::enable_archive_object_check();
962
963 // For each specified MemRegion range, allocate the corresponding G1
964 // regions and mark them as archive regions. We expect the ranges in
965 // ascending starting address order, without overlap.
966 for (size_t i = 0; i < count; i++) {
967 MemRegion curr_range = ranges[i];
968 HeapWord* start_address = curr_range.start();
969 size_t word_size = curr_range.word_size();
970 HeapWord* last_address = curr_range.last();
971 size_t commits = 0;
972
973 guarantee(reserved.contains(start_address) && reserved.contains(last_address),
974 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
975 p2i(start_address), p2i(last_address));
976 guarantee(start_address > prev_last_addr,
977 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
978 p2i(start_address), p2i(prev_last_addr));
979 prev_last_addr = last_address;
980
981 // Check for ranges that start in the same G1 region in which the previous
982 // range ended, and adjust the start address so we don't try to allocate
983 // the same region again. If the current range is entirely within that
984 // region, skip it, just adjusting the recorded top.
985 HeapRegion* start_region = _hrm.addr_to_region(start_address);
986 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
987 start_address = start_region->end();
988 if (start_address > last_address) {
989 increase_used(word_size * HeapWordSize);
990 start_region->set_top(last_address + 1);
991 continue;
992 }
993 start_region->set_top(start_address);
994 curr_range = MemRegion(start_address, last_address + 1);
995 start_region = _hrm.addr_to_region(start_address);
996 }
997
998 // Perform the actual region allocation, exiting if it fails.
1000 if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
1001 return false;
1002 }
1003 increase_used(word_size * HeapWordSize);
1004 if (commits != 0) {
1005 ergo_verbose1(ErgoHeapSizing,
1006 "attempt heap expansion",
1007 ergo_format_reason("allocate archive regions")
1008 ergo_format_byte("total size"),
1009 HeapRegion::GrainWords * HeapWordSize * commits);
1010 }
1011
1012 // Mark each G1 region touched by the range as archive, add it to the old set,
1013 // and set the allocation context and top.
1014 HeapRegion* curr_region = _hrm.addr_to_region(start_address);
1015 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1016 prev_last_region = last_region;
1017
1018 while (curr_region != NULL) {
1019 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1020 "Region already in use (index %u)", curr_region->hrm_index());
1021 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1022 curr_region->set_allocation_context(AllocationContext::system());
1023 curr_region->set_archive();
1024 _old_set.add(curr_region);
1025 if (curr_region != last_region) {
1026 curr_region->set_top(curr_region->end());
1027 curr_region = _hrm.next_region_in_heap(curr_region);
1028 } else {
1029 curr_region->set_top(last_address + 1);
1030 curr_region = NULL;
1031 }
1032 }
1033
1034 // Notify mark-sweep of the archive range.
1035 G1MarkSweep::set_range_archive(curr_range, true);
1036 }
1037 return true;
1038 }
1039
1040 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1041 assert(!is_init_completed(), "Expect to be called at JVM init time");
1042 assert(ranges != NULL, "MemRegion array NULL");
1043 assert(count != 0, "No MemRegions provided");
1044 MemRegion reserved = _hrm.reserved();
1045 HeapWord *prev_last_addr = NULL;
1046 HeapRegion* prev_last_region = NULL;
1047
1048 // For each MemRegion, create filler objects, if needed, in the G1 regions
1049 // that contain the address range. The address range actually within the
1050 // MemRegion will not be modified. That is assumed to have been initialized
1051 // elsewhere, probably via an mmap of archived heap data.
1052 MutexLockerEx x(Heap_lock);
1053 for (size_t i = 0; i < count; i++) {
1054 HeapWord* start_address = ranges[i].start();
1055 HeapWord* last_address = ranges[i].last();
1056
1057 assert(reserved.contains(start_address) && reserved.contains(last_address),
1058 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1059 p2i(start_address), p2i(last_address));
1060 assert(start_address > prev_last_addr,
1061 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1062 p2i(start_address), p2i(prev_last_addr));
1063
1064 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1065 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1066 HeapWord* bottom_address = start_region->bottom();
1067
1068 // Check for a range beginning in the same region in which the
1069 // previous one ended.
1070 if (start_region == prev_last_region) {
1071 bottom_address = prev_last_addr + 1;
1072 }
1073
1074 // Verify that the regions were all marked as archive regions by
1075 // alloc_archive_regions.
1076 HeapRegion* curr_region = start_region;
1077 while (curr_region != NULL) {
1078 guarantee(curr_region->is_archive(),
1079 "Expected archive region at index %u", curr_region->hrm_index());
1080 if (curr_region != last_region) {
1081 curr_region = _hrm.next_region_in_heap(curr_region);
1082 } else {
1083 curr_region = NULL;
1084 }
1085 }
1086
1087 prev_last_addr = last_address;
1088 prev_last_region = last_region;
1089
1090 // Fill the memory below the allocated range with dummy object(s),
1091 // if the region bottom does not match the range start, or if the previous
1092 // range ended within the same G1 region, and there is a gap.
1093 if (start_address != bottom_address) {
1094 size_t fill_size = pointer_delta(start_address, bottom_address);
1095 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
1096 increase_used(fill_size * HeapWordSize);
1097 }
1098 }
1099 }
1122 }
1123
1124 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
1125 assert(!is_init_completed(), "Expect to be called at JVM init time");
1126 assert(ranges != NULL, "MemRegion array NULL");
1127 assert(count != 0, "No MemRegions provided");
1128 MemRegion reserved = _hrm.reserved();
1129 HeapWord* prev_last_addr = NULL;
1130 HeapRegion* prev_last_region = NULL;
1131 size_t size_used = 0;
1132 size_t uncommitted_regions = 0;
1133
1134 // For each Memregion, free the G1 regions that constitute it, and
1135 // notify mark-sweep that the range is no longer to be considered 'archive.'
1136 MutexLockerEx x(Heap_lock);
1137 for (size_t i = 0; i < count; i++) {
1138 HeapWord* start_address = ranges[i].start();
1139 HeapWord* last_address = ranges[i].last();
1140
1141 assert(reserved.contains(start_address) && reserved.contains(last_address),
1142 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1143 p2i(start_address), p2i(last_address));
1144 assert(start_address > prev_last_addr,
1145 "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1146 p2i(start_address), p2i(prev_last_addr));
1147 size_used += ranges[i].byte_size();
1148 prev_last_addr = last_address;
1149
1150 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1151 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1152
1153 // Check for ranges that start in the same G1 region in which the previous
1154 // range ended, and adjust the start address so we don't try to free
1155 // the same region again. If the current range is entirely within that
1156 // region, skip it.
1157 if (start_region == prev_last_region) {
1158 start_address = start_region->end();
1159 if (start_address > last_address) {
1160 continue;
1161 }
1162 start_region = _hrm.addr_to_region(start_address);
1163 }
1164 prev_last_region = last_region;
1165
1166 // After verifying that each region was marked as an archive region by
1167 // alloc_archive_regions, set it free and empty and uncommit it.
1168 HeapRegion* curr_region = start_region;
1169 while (curr_region != NULL) {
1170 guarantee(curr_region->is_archive(),
1171 "Expected archive region at index %u", curr_region->hrm_index());
1172 uint curr_index = curr_region->hrm_index();
1173 _old_set.remove(curr_region);
1174 curr_region->set_free();
1175 curr_region->set_top(curr_region->bottom());
1176 if (curr_region != last_region) {
1177 curr_region = _hrm.next_region_in_heap(curr_region);
1178 } else {
1179 curr_region = NULL;
1180 }
1181 _hrm.shrink_at(curr_index, 1);
1182 uncommitted_regions++;
1183 }
1184
1185 // Notify mark-sweep that this is no longer an archive range.
1186 G1MarkSweep::set_range_archive(ranges[i], false);
1187 }
1188
1189 if (uncommitted_regions != 0) {
1190 ergo_verbose1(ErgoHeapSizing,
1191 "attempt heap shrinking",
1738 // 32-bit size_t's.
1739 double used_after_gc_d = (double) used_after_gc;
1740 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1741 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1742
1743 // Let's make sure that they are both under the max heap size, which
1744 // by default will make them fit into a size_t.
1745 double desired_capacity_upper_bound = (double) max_heap_size;
1746 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1747 desired_capacity_upper_bound);
1748 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1749 desired_capacity_upper_bound);
1750
1751 // We can now safely turn them into size_t's.
1752 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1753 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1754
1755 // This assert only makes sense here, before we adjust them
1756 // with respect to the min and max heap size.
1757 assert(minimum_desired_capacity <= maximum_desired_capacity,
1758 "minimum_desired_capacity = " SIZE_FORMAT ", "
1759 "maximum_desired_capacity = " SIZE_FORMAT,
1760 minimum_desired_capacity, maximum_desired_capacity);
1761
1762 // Should not be greater than the heap max size. No need to adjust
1763 // it with respect to the heap min size as it's a lower bound (i.e.,
1764 // we'll try to make the capacity larger than it, not smaller).
1765 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1766 // Should not be less than the heap min size. No need to adjust it
1767 // with respect to the heap max size as it's an upper bound (i.e.,
1768 // we'll try to make the capacity smaller than it, not greater).
1769 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1770
1771 if (capacity_after_gc < minimum_desired_capacity) {
1772 // Don't expand unless it's significant
1773 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1774 ergo_verbose4(ErgoHeapSizing,
1775 "attempt heap expansion",
1776 ergo_format_reason("capacity lower than "
1777 "min desired capacity after Full GC")
1778 ergo_format_byte("capacity")
1779 ergo_format_byte("occupancy")
1780 ergo_format_byte_perc("min desired capacity"),
2522
2523 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2524 // Let's use the existing mechanism for the allocation
2525 HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2526 AllocationContext::system());
2527 if (dummy_obj != NULL) {
2528 MemRegion mr(dummy_obj, word_size);
2529 CollectedHeap::fill_with_object(mr);
2530 } else {
2531 // If we can't allocate once, we probably cannot allocate
2532 // again. Let's get out of the loop.
2533 break;
2534 }
2535 }
2536 }
2537 #endif // !PRODUCT
2538
2539 void G1CollectedHeap::increment_old_marking_cycles_started() {
2540 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2541 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2542 "Wrong marking cycle count (started: %d, completed: %d)",
2543 _old_marking_cycles_started, _old_marking_cycles_completed);
2544
2545 _old_marking_cycles_started++;
2546 }
2547
2548 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2549 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2550
2551 // We assume that if concurrent == true, then the caller is a
2552 // concurrent thread that was joined the Suspendible Thread
2553 // Set. If there's ever a cheap way to check this, we should add an
2554 // assert here.
2555
2556 // Given that this method is called at the end of a Full GC or of a
2557 // concurrent cycle, and those can be nested (i.e., a Full GC can
2558 // interrupt a concurrent cycle), the number of full collections
2559 // completed should be either one (in the case where there was no
2560 // nesting) or two (when a Full GC interrupted a concurrent cycle)
2561 // behind the number of full collections started.
2562
2563 // This is the case for the inner caller, i.e. a Full GC.
2564 assert(concurrent ||
2565 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2566 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2567 "for inner caller (Full GC): _old_marking_cycles_started = %u "
2568 "is inconsistent with _old_marking_cycles_completed = %u",
2569 _old_marking_cycles_started, _old_marking_cycles_completed);
2570
2571 // This is the case for the outer caller, i.e. the concurrent cycle.
2572 assert(!concurrent ||
2573 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2574 "for outer caller (concurrent cycle): "
2575 "_old_marking_cycles_started = %u "
2576 "is inconsistent with _old_marking_cycles_completed = %u",
2577 _old_marking_cycles_started, _old_marking_cycles_completed);
2578
2579 _old_marking_cycles_completed += 1;
2580
2581 // We need to clear the "in_progress" flag in the CM thread before
2582 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2583 // is set) so that if a waiter requests another System.gc() it doesn't
2584 // incorrectly see that a marking cycle is still in progress.
2585 if (concurrent) {
2586 _cmThread->set_idle();
2587 }
2588
2589 // This notify_all() will ensure that a thread that called
2590 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2591 // and it's waiting for a full GC to finish will be woken up. It is
2592 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2593 FullGCCount_lock->notify_all();
2594 }
2595
2596 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2597 collector_state()->set_concurrent_cycle_started(true);
3107 public:
3108 YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3109 void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3110 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3111
3112 int count() { return _count; }
3113 void reset_count() { _count = 0; };
3114 };
3115
3116 class VerifyKlassClosure: public KlassClosure {
3117 YoungRefCounterClosure _young_ref_counter_closure;
3118 OopClosure *_oop_closure;
3119 public:
3120 VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3121 void do_klass(Klass* k) {
3122 k->oops_do(_oop_closure);
3123
3124 _young_ref_counter_closure.reset_count();
3125 k->oops_do(&_young_ref_counter_closure);
3126 if (_young_ref_counter_closure.count() > 0) {
3127 guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
3128 }
3129 }
3130 };
3131
3132 class VerifyLivenessOopClosure: public OopClosure {
3133 G1CollectedHeap* _g1h;
3134 VerifyOption _vo;
3135 public:
3136 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3137 _g1h(g1h), _vo(vo)
3138 { }
3139 void do_oop(narrowOop *p) { do_oop_work(p); }
3140 void do_oop( oop *p) { do_oop_work(p); }
3141
3142 template <class T> void do_oop_work(T *p) {
3143 oop obj = oopDesc::load_decode_heap_oop(p);
3144 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3145 "Dead object referenced by a not dead object");
3146 }
3147 };
3177
3178 o->oop_iterate_no_header(&isLive);
3179 if (!_hr->obj_allocated_since_prev_marking(o)) {
3180 size_t obj_size = o->size(); // Make sure we don't overflow
3181 _live_bytes += (obj_size * HeapWordSize);
3182 }
3183 }
3184 }
3185 size_t live_bytes() { return _live_bytes; }
3186 };
3187
3188 class VerifyArchiveOopClosure: public OopClosure {
3189 public:
3190 VerifyArchiveOopClosure(HeapRegion *hr) { }
3191 void do_oop(narrowOop *p) { do_oop_work(p); }
3192 void do_oop( oop *p) { do_oop_work(p); }
3193
3194 template <class T> void do_oop_work(T *p) {
3195 oop obj = oopDesc::load_decode_heap_oop(p);
3196 guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
3197 "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
3198 p2i(p), p2i(obj));
3199 }
3200 };
3201
3202 class VerifyArchiveRegionClosure: public ObjectClosure {
3203 public:
3204 VerifyArchiveRegionClosure(HeapRegion *hr) { }
3205 // Verify that all object pointers are to archive regions.
3206 void do_object(oop o) {
3207 VerifyArchiveOopClosure checkOop(NULL);
3208 assert(o != NULL, "Should not be here for NULL oops");
3209 o->oop_iterate_no_header(&checkOop);
3210 }
3211 };
3212
3213 class VerifyRegionClosure: public HeapRegionClosure {
3214 private:
3215 bool _par;
3216 VerifyOption _vo;
3217 bool _failures;
3218 public:
4692
4693 public:
4694 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4695 AbstractGangTask("String/Symbol Unlinking"),
4696 _is_alive(is_alive),
4697 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4698 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4699
4700 _initial_string_table_size = StringTable::the_table()->table_size();
4701 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4702 if (process_strings) {
4703 StringTable::clear_parallel_claimed_index();
4704 }
4705 if (process_symbols) {
4706 SymbolTable::clear_parallel_claimed_index();
4707 }
4708 }
4709
4710 ~G1StringSymbolTableUnlinkTask() {
4711 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4712 "claim value %d after unlink less than initial string table size %d",
4713 StringTable::parallel_claimed_index(), _initial_string_table_size);
4714 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4715 "claim value %d after unlink less than initial symbol table size %d",
4716 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4717
4718 if (G1TraceStringSymbolTableScrubbing) {
4719 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4720 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4721 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4722 strings_processed(), strings_removed(),
4723 symbols_processed(), symbols_removed());
4724 }
4725 }
4726
4727 void work(uint worker_id) {
4728 int strings_processed = 0;
4729 int strings_removed = 0;
4730 int symbols_processed = 0;
4731 int symbols_removed = 0;
4732 if (_process_strings) {
4733 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4734 Atomic::add(strings_processed, &_strings_processed);
4735 Atomic::add(strings_removed, &_strings_removed);
4736 }
5096 // Non Copying Keep Alive closure
5097 class G1KeepAliveClosure: public OopClosure {
5098 G1CollectedHeap* _g1;
5099 public:
5100 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5101 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5102 void do_oop(oop* p) {
5103 oop obj = *p;
5104 assert(obj != NULL, "the caller should have filtered out NULL values");
5105
5106 const InCSetState cset_state = _g1->in_cset_state(obj);
5107 if (!cset_state.is_in_cset_or_humongous()) {
5108 return;
5109 }
5110 if (cset_state.is_in_cset()) {
5111 assert( obj->is_forwarded(), "invariant" );
5112 *p = obj->forwardee();
5113 } else {
5114 assert(!obj->is_forwarded(), "invariant" );
5115 assert(cset_state.is_humongous(),
5116 "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
5117 _g1->set_humongous_is_live(obj);
5118 }
5119 }
5120 };
5121
5122 // Copying Keep Alive closure - can be called from both
5123 // serial and parallel code as long as different worker
5124 // threads utilize different G1ParScanThreadState instances
5125 // and different queues.
5126
5127 class G1CopyingKeepAliveClosure: public OopClosure {
5128 G1CollectedHeap* _g1h;
5129 OopClosure* _copy_non_heap_obj_cl;
5130 G1ParScanThreadState* _par_scan_state;
5131
5132 public:
5133 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5134 OopClosure* non_heap_obj_cl,
5135 G1ParScanThreadState* pss):
5136 _g1h(g1h),
5150 // evacuation failure) then we need to update the reference
5151 // field and, if both reference and referent are in the G1
5152 // heap, update the RSet for the referent.
5153 //
5154 // If the referent has not been forwarded then we have to keep
5155 // it alive by policy. Therefore we have copy the referent.
5156 //
5157 // If the reference field is in the G1 heap then we can push
5158 // on the PSS queue. When the queue is drained (after each
5159 // phase of reference processing) the object and it's followers
5160 // will be copied, the reference field set to point to the
5161 // new location, and the RSet updated. Otherwise we need to
5162 // use the the non-heap or metadata closures directly to copy
5163 // the referent object and update the pointer, while avoiding
5164 // updating the RSet.
5165
5166 if (_g1h->is_in_g1_reserved(p)) {
5167 _par_scan_state->push_on_queue(p);
5168 } else {
5169 assert(!Metaspace::contains((const void*)p),
5170 "Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p));
5171 _copy_non_heap_obj_cl->do_oop(p);
5172 }
5173 }
5174 }
5175 };
5176
5177 // Serial drain queue closure. Called as the 'complete_gc'
5178 // closure for each discovered list in some of the
5179 // reference processing phases.
5180
5181 class G1STWDrainQueueClosure: public VoidClosure {
5182 protected:
5183 G1CollectedHeap* _g1h;
5184 G1ParScanThreadState* _par_scan_state;
5185
5186 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
5187
5188 public:
5189 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5190 _g1h(g1h),
5806 ct_bs->verify_g1_young_region(mr);
5807 } else {
5808 ct_bs->verify_dirty_region(mr);
5809 }
5810 }
5811
5812 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5813 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5814 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5815 verify_dirty_region(hr);
5816 }
5817 }
5818
5819 void G1CollectedHeap::verify_dirty_young_regions() {
5820 verify_dirty_young_list(_young_list->first_region());
5821 }
5822
5823 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5824 HeapWord* tams, HeapWord* end) {
5825 guarantee(tams <= end,
5826 "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5827 HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5828 if (result < end) {
5829 gclog_or_tty->cr();
5830 gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5831 bitmap_name, p2i(result));
5832 gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5833 bitmap_name, p2i(tams), p2i(end));
5834 return false;
5835 }
5836 return true;
5837 }
5838
5839 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5840 CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5841 CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5842
5843 HeapWord* bottom = hr->bottom();
5844 HeapWord* ptams = hr->prev_top_at_mark_start();
5845 HeapWord* ntams = hr->next_top_at_mark_start();
5846 HeapWord* end = hr->end();
6157 !r->rem_set()->is_empty()) {
6158
6159 if (G1TraceEagerReclaimHumongousObjects) {
6160 gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6161 region_idx,
6162 (size_t)obj->size() * HeapWordSize,
6163 p2i(r->bottom()),
6164 r->region_num(),
6165 r->rem_set()->occupied(),
6166 r->rem_set()->strong_code_roots_list_length(),
6167 next_bitmap->isMarked(r->bottom()),
6168 g1h->is_humongous_reclaim_candidate(region_idx),
6169 obj->is_typeArray()
6170 );
6171 }
6172
6173 return false;
6174 }
6175
6176 guarantee(obj->is_typeArray(),
6177 "Only eagerly reclaiming type arrays is supported, but the object "
6178 PTR_FORMAT " is not.", p2i(r->bottom()));
6179
6180 if (G1TraceEagerReclaimHumongousObjects) {
6181 gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6182 region_idx,
6183 (size_t)obj->size() * HeapWordSize,
6184 p2i(r->bottom()),
6185 r->region_num(),
6186 r->rem_set()->occupied(),
6187 r->rem_set()->strong_code_roots_list_length(),
6188 next_bitmap->isMarked(r->bottom()),
6189 g1h->is_humongous_reclaim_candidate(region_idx),
6190 obj->is_typeArray()
6191 );
6192 }
6193 // Need to clear mark bit of the humongous object if already set.
6194 if (next_bitmap->isMarked(r->bottom())) {
6195 next_bitmap->clear(r->bottom());
6196 }
6197 _freed_bytes += r->used();
6198 r->set_containing_set(NULL);
6387 assert_at_safepoint(true /* should_be_vm_thread */);
6388
6389 if (!free_list_only) {
6390 TearDownRegionSetsClosure cl(&_old_set);
6391 heap_region_iterate(&cl);
6392
6393 // Note that emptying the _young_list is postponed and instead done as
6394 // the first step when rebuilding the regions sets again. The reason for
6395 // this is that during a full GC string deduplication needs to know if
6396 // a collected region was young or old when the full GC was initiated.
6397 }
6398 _hrm.remove_all_free_regions();
6399 }
6400
6401 void G1CollectedHeap::increase_used(size_t bytes) {
6402 _summary_bytes_used += bytes;
6403 }
6404
6405 void G1CollectedHeap::decrease_used(size_t bytes) {
6406 assert(_summary_bytes_used >= bytes,
6407 "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
6408 _summary_bytes_used, bytes);
6409 _summary_bytes_used -= bytes;
6410 }
6411
6412 void G1CollectedHeap::set_used(size_t bytes) {
6413 _summary_bytes_used = bytes;
6414 }
6415
6416 class RebuildRegionSetsClosure : public HeapRegionClosure {
6417 private:
6418 bool _free_list_only;
6419 HeapRegionSet* _old_set;
6420 HeapRegionManager* _hrm;
6421 size_t _total_used;
6422
6423 public:
6424 RebuildRegionSetsClosure(bool free_list_only,
6425 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6426 _free_list_only(free_list_only),
6427 _old_set(old_set), _hrm(hrm), _total_used(0) {
6428 assert(_hrm->num_free_regions() == 0, "pre-condition");
6470 }
6471 };
6472
6473 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6474 assert_at_safepoint(true /* should_be_vm_thread */);
6475
6476 if (!free_list_only) {
6477 _young_list->empty_list();
6478 }
6479
6480 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6481 heap_region_iterate(&cl);
6482
6483 if (!free_list_only) {
6484 set_used(cl.total_used());
6485 if (_archive_allocator != NULL) {
6486 _archive_allocator->clear_used();
6487 }
6488 }
6489 assert(used_unlocked() == recalculate_used(),
6490 "inconsistent used_unlocked(), "
6491 "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
6492 used_unlocked(), recalculate_used());
6493 }
6494
6495 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6496 _refine_cte_cl->set_concurrent(concurrent);
6497 }
6498
6499 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6500 HeapRegion* hr = heap_region_containing(p);
6501 return hr->is_in(p);
6502 }
6503
6504 // Methods for the mutator alloc region
6505
6506 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6507 bool force) {
6508 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6509 assert(!force || g1_policy()->can_expand_young_list(),
6510 "if force is true we should be able to expand the young list");
6511 bool young_list_full = g1_policy()->is_young_list_full();
6512 if (force || !young_list_full) {
6613
6614 public:
6615 HeapRegionSetCount _old_count;
6616 HeapRegionSetCount _humongous_count;
6617 HeapRegionSetCount _free_count;
6618
6619 VerifyRegionListsClosure(HeapRegionSet* old_set,
6620 HeapRegionSet* humongous_set,
6621 HeapRegionManager* hrm) :
6622 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6623 _old_count(), _humongous_count(), _free_count(){ }
6624
6625 bool doHeapRegion(HeapRegion* hr) {
6626 if (hr->is_continues_humongous()) {
6627 return false;
6628 }
6629
6630 if (hr->is_young()) {
6631 // TODO
6632 } else if (hr->is_starts_humongous()) {
6633 assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index());
6634 _humongous_count.increment(1u, hr->capacity());
6635 } else if (hr->is_empty()) {
6636 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6637 _free_count.increment(1u, hr->capacity());
6638 } else if (hr->is_old()) {
6639 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6640 _old_count.increment(1u, hr->capacity());
6641 } else {
6642 // There are no other valid region types. Check for one invalid
6643 // one we can identify: pinned without old or humongous set.
6644 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6645 ShouldNotReachHere();
6646 }
6647 return false;
6648 }
6649
6650 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6651 guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
6652 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6653 old_set->total_capacity_bytes(), _old_count.capacity());
6654
6655 guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length());
6656 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6657 humongous_set->total_capacity_bytes(), _humongous_count.capacity());
6658
6659 guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length());
6660 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6661 free_list->total_capacity_bytes(), _free_count.capacity());
6662 }
6663 };
6664
6665 void G1CollectedHeap::verify_region_sets() {
6666 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6667
6668 // First, check the explicit lists.
6669 _hrm.verify();
6670 {
6671 // Given that a concurrent operation might be adding regions to
6672 // the secondary free list we have to take the lock before
6673 // verifying it.
6674 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6675 _secondary_free_list.verify_list();
6676 }
6677
6678 // If a concurrent region freeing operation is in progress it will
6679 // be difficult to correctly attributed any free regions we come
6680 // across to the correct free list given that they might belong to
6681 // one of several (free_list, secondary_free_list, any local lists,
6697 // Finally, make sure that the region accounting in the lists is
6698 // consistent with what we see in the heap.
6699
6700 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6701 heap_region_iterate(&cl);
6702 cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6703 }
6704
6705 // Optimized nmethod scanning
6706
6707 class RegisterNMethodOopClosure: public OopClosure {
6708 G1CollectedHeap* _g1h;
6709 nmethod* _nm;
6710
6711 template <class T> void do_oop_work(T* p) {
6712 T heap_oop = oopDesc::load_heap_oop(p);
6713 if (!oopDesc::is_null(heap_oop)) {
6714 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6715 HeapRegion* hr = _g1h->heap_region_containing(obj);
6716 assert(!hr->is_continues_humongous(),
6717 "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6718 " starting at " HR_FORMAT,
6719 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
6720
6721 // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6722 hr->add_strong_code_root_locked(_nm);
6723 }
6724 }
6725
6726 public:
6727 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6728 _g1h(g1h), _nm(nm) {}
6729
6730 void do_oop(oop* p) { do_oop_work(p); }
6731 void do_oop(narrowOop* p) { do_oop_work(p); }
6732 };
6733
6734 class UnregisterNMethodOopClosure: public OopClosure {
6735 G1CollectedHeap* _g1h;
6736 nmethod* _nm;
6737
6738 template <class T> void do_oop_work(T* p) {
6739 T heap_oop = oopDesc::load_heap_oop(p);
6740 if (!oopDesc::is_null(heap_oop)) {
6741 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6742 HeapRegion* hr = _g1h->heap_region_containing(obj);
6743 assert(!hr->is_continues_humongous(),
6744 "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6745 " starting at " HR_FORMAT,
6746 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
6747
6748 hr->remove_strong_code_root(_nm);
6749 }
6750 }
6751
6752 public:
6753 UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6754 _g1h(g1h), _nm(nm) {}
6755
6756 void do_oop(oop* p) { do_oop_work(p); }
6757 void do_oop(narrowOop* p) { do_oop_work(p); }
6758 };
6759
6760 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6761 CollectedHeap::register_nmethod(nm);
6762
6763 guarantee(nm != NULL, "sanity");
6764 RegisterNMethodOopClosure reg_cl(this, nm);
6765 nm->oops_do(®_cl);
6766 }
|