303 // Index of last region in the series + 1.
304 uint last = first + num_regions;
305
306 // We need to initialize the region(s) we just discovered. This is
307 // a bit tricky given that it can happen concurrently with
308 // refinement threads refining cards on these regions and
309 // potentially wanting to refine the BOT as they are scanning
310 // those cards (this can happen shortly after a cleanup; see CR
311 // 6991377). So we have to set up the region(s) carefully and in
312 // a specific order.
313
314 // The word size sum of all the regions we will allocate.
315 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
316 assert(word_size <= word_size_sum, "sanity");
317
318 // This will be the "starts humongous" region.
319 HeapRegion* first_hr = region_at(first);
320 // The header of the new object will be placed at the bottom of
321 // the first region.
322 HeapWord* new_obj = first_hr->bottom();
323 // This will be the new end of the first region in the series that
324 // should also match the end of the last region in the series.
325 HeapWord* new_end = new_obj + word_size_sum;
326 // This will be the new top of the first region that will reflect
327 // this allocation.
328 HeapWord* new_top = new_obj + word_size;
329
330 // First, we need to zero the header of the space that we will be
331 // allocating. When we update top further down, some refinement
332 // threads might try to scan the region. By zeroing the header we
333 // ensure that any thread that will try to scan the region will
334 // come across the zero klass word and bail out.
335 //
336 // NOTE: It would not have been correct to have used
337 // CollectedHeap::fill_with_object() and make the space look like
338 // an int array. The thread that is doing the allocation will
339 // later update the object header to a potentially different array
340 // type and, for a very short period of time, the klass and length
341 // fields will be inconsistent. This could cause a refinement
342 // thread to calculate the object size incorrectly.
343 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
344
345 // We will set up the first region as "starts humongous". This
346 // will also update the BOT covering all the regions to reflect
347 // that there is a single object that starts at the bottom of the
348 // first region.
349 first_hr->set_starts_humongous(new_top, new_end);
350 first_hr->set_allocation_context(context);
351 // Then, if there are any, we will set up the "continues
352 // humongous" regions.
353 HeapRegion* hr = NULL;
354 for (uint i = first + 1; i < last; ++i) {
355 hr = region_at(i);
356 hr->set_continues_humongous(first_hr);
357 hr->set_allocation_context(context);
358 }
359 // If we have "continues humongous" regions (hr != NULL), then the
360 // end of the last one should match new_end.
361 assert(hr == NULL || hr->end() == new_end, "sanity");
362
363 // Up to this point no concurrent thread would have been able to
364 // do any scanning on any region in this series. All the top
365 // fields still point to bottom, so the intersection between
366 // [bottom,top] and [card_start,card_end] will be empty. Before we
367 // update the top fields, we'll do a storestore to make sure that
368 // no thread sees the update to top before the zeroing of the
369 // object header and the BOT initialization.
370 OrderAccess::storestore();
371
372 // Now that the BOT and the object header have been initialized,
373 // we can update top of the "starts humongous" region.
374 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
375 "new_top should be in this region");
376 first_hr->set_top(new_top);
377 if (_hr_printer.is_active()) {
378 HeapWord* bottom = first_hr->bottom();
379 HeapWord* end = first_hr->orig_end();
380 if ((first + 1) == last) {
381 // the series has a single humongous region
382 _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
383 } else {
384 // the series has more than one humongous regions
385 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
386 }
387 }
388
389 // Now, we will update the top fields of the "continues humongous"
390 // regions. The reason we need to do this is that, otherwise,
391 // these regions would look empty and this will confuse parts of
392 // G1. For example, the code that looks for a consecutive number
393 // of empty regions will consider them empty and try to
394 // re-allocate them. We can extend is_empty() to also include
395 // !is_continues_humongous(), but it is easier to just update the top
396 // fields here. The way we set top for all regions (i.e., top ==
397 // end for all regions but the last one, top == new_top for the
398 // last one) is actually used when we will free up the humongous
399 // region in free_humongous_region().
400 hr = NULL;
401 for (uint i = first + 1; i < last; ++i) {
402 hr = region_at(i);
403 if ((i + 1) == last) {
404 // last continues humongous region
405 assert(hr->bottom() < new_top && new_top <= hr->end(),
406 "new_top should fall on this region");
407 hr->set_top(new_top);
408 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
409 } else {
410 // not last one
411 assert(new_top > hr->end(), "new_top should be above this region");
412 hr->set_top(hr->end());
413 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
414 }
415 }
416 // If we have continues humongous regions (hr != NULL), then the
417 // end of the last one should match new_end and its top should
418 // match new_top.
419 assert(hr == NULL ||
420 (hr->end() == new_end && hr->top() == new_top), "sanity");
421 check_bitmaps("Humongous Region Allocation", first_hr);
422
423 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
424 increase_used(first_hr->used());
425 _humongous_set.add(first_hr);
426
427 return new_obj;
428 }
429
430 // If could fit into free regions w/o expansion, try.
431 // Otherwise, if can expand, do so.
432 // Otherwise, if using ex regions might help, try with ex given back.
433 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
434 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
435
436 verify_region_sets_optional();
437
438 uint first = G1_NO_HRM_INDEX;
439 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
440
441 if (obj_regions == 1) {
442 // Only one region to allocate, try to use a fast path by directly allocating
443 // from the free lists. Do not try to expand here, we will potentially do that
444 // later.
445 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
1122 HeapWord* result = humongous_obj_allocate(word_size, context);
1123 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1124 collector_state()->set_initiate_conc_mark_if_possible(true);
1125 }
1126 return result;
1127 }
1128
1129 ShouldNotReachHere();
1130 }
1131
1132 class PostMCRemSetClearClosure: public HeapRegionClosure {
1133 G1CollectedHeap* _g1h;
1134 ModRefBarrierSet* _mr_bs;
1135 public:
1136 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1137 _g1h(g1h), _mr_bs(mr_bs) {}
1138
1139 bool doHeapRegion(HeapRegion* r) {
1140 HeapRegionRemSet* hrrs = r->rem_set();
1141
1142 if (r->is_continues_humongous()) {
1143 // We'll assert that the strong code root list and RSet is empty
1144 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1145 assert(hrrs->occupied() == 0, "RSet should be empty");
1146 return false;
1147 }
1148
1149 _g1h->reset_gc_time_stamps(r);
1150 hrrs->clear();
1151 // You might think here that we could clear just the cards
1152 // corresponding to the used region. But no: if we leave a dirty card
1153 // in a region we might allocate into, then it would prevent that card
1154 // from being enqueued, and cause it to be missed.
1155 // Re: the performance cost: we shouldn't be doing full GC anyway!
1156 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1157
1158 return false;
1159 }
1160 };
1161
1162 void G1CollectedHeap::clear_rsets_post_compaction() {
1163 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1164 heap_region_iterate(&rs_clear);
1165 }
1166
1167 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1168 G1CollectedHeap* _g1h;
1169 UpdateRSOopClosure _cl;
1188
1189 public:
1190 ParRebuildRSTask(G1CollectedHeap* g1) :
1191 AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1192
1193 void work(uint worker_id) {
1194 RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1195 _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1196 }
1197 };
1198
1199 class PostCompactionPrinterClosure: public HeapRegionClosure {
1200 private:
1201 G1HRPrinter* _hr_printer;
1202 public:
1203 bool doHeapRegion(HeapRegion* hr) {
1204 assert(!hr->is_young(), "not expecting to find young regions");
1205 if (hr->is_free()) {
1206 // We only generate output for non-empty regions.
1207 } else if (hr->is_starts_humongous()) {
1208 if (hr->region_num() == 1) {
1209 // single humongous region
1210 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1211 } else {
1212 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1213 }
1214 } else if (hr->is_continues_humongous()) {
1215 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1216 } else if (hr->is_archive()) {
1217 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1218 } else if (hr->is_old()) {
1219 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1220 } else {
1221 ShouldNotReachHere();
1222 }
1223 return false;
1224 }
1225
1226 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1227 : _hr_printer(hr_printer) { }
1228 };
1229
1230 void G1CollectedHeap::print_hrm_post_compaction() {
1231 PostCompactionPrinterClosure cl(hr_printer());
1232 heap_region_iterate(&cl);
1233 }
2205 (ParallelGCThreads > 1),
2206 // mt discovery
2207 ParallelGCThreads,
2208 // degree of mt discovery
2209 true,
2210 // Reference discovery is atomic
2211 &_is_alive_closure_stw);
2212 // is alive closure
2213 // (for efficiency/performance)
2214 }
2215
2216 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2217 return g1_policy();
2218 }
2219
2220 size_t G1CollectedHeap::capacity() const {
2221 return _hrm.length() * HeapRegion::GrainBytes;
2222 }
2223
2224 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2225 assert(!hr->is_continues_humongous(), "pre-condition");
2226 hr->reset_gc_time_stamp();
2227 if (hr->is_starts_humongous()) {
2228 uint first_index = hr->hrm_index() + 1;
2229 uint last_index = hr->last_hc_index();
2230 for (uint i = first_index; i < last_index; i += 1) {
2231 HeapRegion* chr = region_at(i);
2232 assert(chr->is_continues_humongous(), "sanity");
2233 chr->reset_gc_time_stamp();
2234 }
2235 }
2236 }
2237
2238 #ifndef PRODUCT
2239
2240 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2241 private:
2242 unsigned _gc_time_stamp;
2243 bool _failures;
2244
2245 public:
2246 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2247 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2248
2249 virtual bool doHeapRegion(HeapRegion* hr) {
2250 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2251 if (_gc_time_stamp != region_gc_time_stamp) {
2252 gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2253 "expected %d", HR_FORMAT_PARAMS(hr),
2254 region_gc_time_stamp, _gc_time_stamp);
2255 _failures = true;
2283 }
2284
2285 // Computes the sum of the storage used by the various regions.
2286 size_t G1CollectedHeap::used() const {
2287 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2288 if (_archive_allocator != NULL) {
2289 result += _archive_allocator->used();
2290 }
2291 return result;
2292 }
2293
2294 size_t G1CollectedHeap::used_unlocked() const {
2295 return _summary_bytes_used;
2296 }
2297
2298 class SumUsedClosure: public HeapRegionClosure {
2299 size_t _used;
2300 public:
2301 SumUsedClosure() : _used(0) {}
2302 bool doHeapRegion(HeapRegion* r) {
2303 if (!r->is_continues_humongous()) {
2304 _used += r->used();
2305 }
2306 return false;
2307 }
2308 size_t result() { return _used; }
2309 };
2310
2311 size_t G1CollectedHeap::recalculate_used() const {
2312 double recalculate_used_start = os::elapsedTime();
2313
2314 SumUsedClosure blk;
2315 heap_region_iterate(&blk);
2316
2317 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2318 return blk.result();
2319 }
2320
2321 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2322 switch (cause) {
2323 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2324 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2325 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2506 // Schedule a standard evacuation pause. We're setting word_size
2507 // to 0 which means that we are not requesting a post-GC allocation.
2508 VM_G1IncCollectionPause op(gc_count_before,
2509 0, /* word_size */
2510 false, /* should_initiate_conc_mark */
2511 g1_policy()->max_pause_time_ms(),
2512 cause);
2513 VMThread::execute(&op);
2514 } else {
2515 // Schedule a Full GC.
2516 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2517 VMThread::execute(&op);
2518 }
2519 }
2520 } while (retry_gc);
2521 }
2522
2523 bool G1CollectedHeap::is_in(const void* p) const {
2524 if (_hrm.reserved().contains(p)) {
2525 // Given that we know that p is in the reserved space,
2526 // heap_region_containing_raw() should successfully
2527 // return the containing region.
2528 HeapRegion* hr = heap_region_containing_raw(p);
2529 return hr->is_in(p);
2530 } else {
2531 return false;
2532 }
2533 }
2534
2535 #ifdef ASSERT
2536 bool G1CollectedHeap::is_in_exact(const void* p) const {
2537 bool contains = reserved_region().contains(p);
2538 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2539 if (contains && available) {
2540 return true;
2541 } else {
2542 return false;
2543 }
2544 }
2545 #endif
2546
2547 bool G1CollectedHeap::obj_in_cs(oop obj) {
2548 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
3045 _vo(vo),
3046 _failures(false) {}
3047
3048 bool failures() {
3049 return _failures;
3050 }
3051
3052 bool doHeapRegion(HeapRegion* r) {
3053 // For archive regions, verify there are no heap pointers to
3054 // non-pinned regions. For all others, verify liveness info.
3055 if (r->is_archive()) {
3056 VerifyArchiveRegionClosure verify_oop_pointers(r);
3057 r->object_iterate(&verify_oop_pointers);
3058 return true;
3059 }
3060 if (!r->is_continues_humongous()) {
3061 bool failures = false;
3062 r->verify(_vo, &failures);
3063 if (failures) {
3064 _failures = true;
3065 } else {
3066 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3067 r->object_iterate(¬_dead_yet_cl);
3068 if (_vo != VerifyOption_G1UseNextMarking) {
3069 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3070 gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3071 "max_live_bytes " SIZE_FORMAT " "
3072 "< calculated " SIZE_FORMAT,
3073 p2i(r->bottom()), p2i(r->end()),
3074 r->max_live_bytes(),
3075 not_dead_yet_cl.live_bytes());
3076 _failures = true;
3077 }
3078 } else {
3079 // When vo == UseNextMarking we cannot currently do a sanity
3080 // check on the live bytes as the calculation has not been
3081 // finalized yet.
3082 }
3083 }
3084 }
3085 return false; // stop the region iteration if we hit a failure
5299 assert(free_list != NULL, "pre-condition");
5300
5301 if (G1VerifyBitmaps) {
5302 MemRegion mr(hr->bottom(), hr->end());
5303 concurrent_mark()->clearRangePrevBitmap(mr);
5304 }
5305
5306 // Clear the card counts for this region.
5307 // Note: we only need to do this if the region is not young
5308 // (since we don't refine cards in young regions).
5309 if (!hr->is_young()) {
5310 _cg1r->hot_card_cache()->reset_card_counts(hr);
5311 }
5312 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5313 free_list->add_ordered(hr);
5314 }
5315
5316 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5317 FreeRegionList* free_list,
5318 bool par) {
5319 assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
5320 assert(free_list != NULL, "pre-condition");
5321
5322 size_t hr_capacity = hr->capacity();
5323 // We need to read this before we make the region non-humongous,
5324 // otherwise the information will be gone.
5325 uint last_index = hr->last_hc_index();
5326 hr->clear_humongous();
5327 free_region(hr, free_list, par);
5328
5329 uint i = hr->hrm_index() + 1;
5330 while (i < last_index) {
5331 HeapRegion* curr_hr = region_at(i);
5332 assert(curr_hr->is_continues_humongous(), "invariant");
5333 curr_hr->clear_humongous();
5334 free_region(curr_hr, free_list, par);
5335 i += 1;
5336 }
5337 }
5338
5339 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5340 const HeapRegionSetCount& humongous_regions_removed) {
5341 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5342 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5343 _old_set.bulk_remove(old_regions_removed);
5344 _humongous_set.bulk_remove(humongous_regions_removed);
5345 }
5346
5347 }
5348
5349 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5350 assert(list != NULL, "list can't be null");
5351 if (!list->is_empty()) {
5352 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5353 _hrm.insert_list_into_free_list(list);
5354 }
5355 }
5356
5480
5481 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5482 if (!G1VerifyBitmaps) return;
5483
5484 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5485 }
5486
5487 class G1VerifyBitmapClosure : public HeapRegionClosure {
5488 private:
5489 const char* _caller;
5490 G1CollectedHeap* _g1h;
5491 bool _failures;
5492
5493 public:
5494 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5495 _caller(caller), _g1h(g1h), _failures(false) { }
5496
5497 bool failures() { return _failures; }
5498
5499 virtual bool doHeapRegion(HeapRegion* hr) {
5500 if (hr->is_continues_humongous()) return false;
5501
5502 bool result = _g1h->verify_bitmaps(_caller, hr);
5503 if (!result) {
5504 _failures = true;
5505 }
5506 return false;
5507 }
5508 };
5509
5510 void G1CollectedHeap::check_bitmaps(const char* caller) {
5511 if (!G1VerifyBitmaps) return;
5512
5513 G1VerifyBitmapClosure cl(caller, this);
5514 heap_region_iterate(&cl);
5515 guarantee(!cl.failures(), "bitmap verification");
5516 }
5517
5518 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5519 private:
5520 bool _failures;
5521 public:
5755 //
5756 // It is not required to check whether the object has been found dead by marking
5757 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5758 // all objects allocated during that time are considered live.
5759 // SATB marking is even more conservative than the remembered set.
5760 // So if at this point in the collection there is no remembered set entry,
5761 // nobody has a reference to it.
5762 // At the start of collection we flush all refinement logs, and remembered sets
5763 // are completely up-to-date wrt to references to the humongous object.
5764 //
5765 // Other implementation considerations:
5766 // - never consider object arrays at this time because they would pose
5767 // considerable effort for cleaning up the the remembered sets. This is
5768 // required because stale remembered sets might reference locations that
5769 // are currently allocated into.
5770 uint region_idx = r->hrm_index();
5771 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5772 !r->rem_set()->is_empty()) {
5773
5774 if (G1TraceEagerReclaimHumongousObjects) {
5775 gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5776 region_idx,
5777 (size_t)obj->size() * HeapWordSize,
5778 p2i(r->bottom()),
5779 r->region_num(),
5780 r->rem_set()->occupied(),
5781 r->rem_set()->strong_code_roots_list_length(),
5782 next_bitmap->isMarked(r->bottom()),
5783 g1h->is_humongous_reclaim_candidate(region_idx),
5784 obj->is_typeArray()
5785 );
5786 }
5787
5788 return false;
5789 }
5790
5791 guarantee(obj->is_typeArray(),
5792 "Only eagerly reclaiming type arrays is supported, but the object "
5793 PTR_FORMAT " is not.", p2i(r->bottom()));
5794
5795 if (G1TraceEagerReclaimHumongousObjects) {
5796 gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5797 region_idx,
5798 (size_t)obj->size() * HeapWordSize,
5799 p2i(r->bottom()),
5800 r->region_num(),
5801 r->rem_set()->occupied(),
5802 r->rem_set()->strong_code_roots_list_length(),
5803 next_bitmap->isMarked(r->bottom()),
5804 g1h->is_humongous_reclaim_candidate(region_idx),
5805 obj->is_typeArray()
5806 );
5807 }
5808 // Need to clear mark bit of the humongous object if already set.
5809 if (next_bitmap->isMarked(r->bottom())) {
5810 next_bitmap->clear(r->bottom());
5811 }
5812 _freed_bytes += r->used();
5813 r->set_containing_set(NULL);
5814 _humongous_regions_removed.increment(1u, r->capacity());
5815 g1h->free_humongous_region(r, _free_region_list, false);
5816
5817 return false;
5818 }
5819
5820 HeapRegionSetCount& humongous_free_count() {
5821 return _humongous_regions_removed;
5822 }
5823
5824 size_t bytes_freed() const {
5825 return _freed_bytes;
5826 }
5827
5828 size_t humongous_reclaimed() const {
5829 return _humongous_regions_removed.length();
5830 }
5831 };
5832
5833 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5834 assert_at_safepoint(true);
5835
6030
6031 class RebuildRegionSetsClosure : public HeapRegionClosure {
6032 private:
6033 bool _free_list_only;
6034 HeapRegionSet* _old_set;
6035 HeapRegionManager* _hrm;
6036 size_t _total_used;
6037
6038 public:
6039 RebuildRegionSetsClosure(bool free_list_only,
6040 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6041 _free_list_only(free_list_only),
6042 _old_set(old_set), _hrm(hrm), _total_used(0) {
6043 assert(_hrm->num_free_regions() == 0, "pre-condition");
6044 if (!free_list_only) {
6045 assert(_old_set->is_empty(), "pre-condition");
6046 }
6047 }
6048
6049 bool doHeapRegion(HeapRegion* r) {
6050 if (r->is_continues_humongous()) {
6051 return false;
6052 }
6053
6054 if (r->is_empty()) {
6055 // Add free regions to the free list
6056 r->set_free();
6057 r->set_allocation_context(AllocationContext::system());
6058 _hrm->insert_into_free_list(r);
6059 } else if (!_free_list_only) {
6060 assert(!r->is_young(), "we should not come across young regions");
6061
6062 if (r->is_humongous()) {
6063 // We ignore humongous regions. We left the humongous set unchanged.
6064 } else {
6065 // Objects that were compacted would have ended up on regions
6066 // that were previously old or free. Archive regions (which are
6067 // old) will not have been touched.
6068 assert(r->is_free() || r->is_old(), "invariant");
6069 // We now consider them old, so register as such. Leave
6070 // archive regions set that way, however, while still adding
6071 // them to the old set.
6072 if (!r->is_archive()) {
6073 r->set_old();
6221 // Heap region set verification
6222
6223 class VerifyRegionListsClosure : public HeapRegionClosure {
6224 private:
6225 HeapRegionSet* _old_set;
6226 HeapRegionSet* _humongous_set;
6227 HeapRegionManager* _hrm;
6228
6229 public:
6230 HeapRegionSetCount _old_count;
6231 HeapRegionSetCount _humongous_count;
6232 HeapRegionSetCount _free_count;
6233
6234 VerifyRegionListsClosure(HeapRegionSet* old_set,
6235 HeapRegionSet* humongous_set,
6236 HeapRegionManager* hrm) :
6237 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6238 _old_count(), _humongous_count(), _free_count(){ }
6239
6240 bool doHeapRegion(HeapRegion* hr) {
6241 if (hr->is_continues_humongous()) {
6242 return false;
6243 }
6244
6245 if (hr->is_young()) {
6246 // TODO
6247 } else if (hr->is_starts_humongous()) {
6248 assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index());
6249 _humongous_count.increment(1u, hr->capacity());
6250 } else if (hr->is_empty()) {
6251 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6252 _free_count.increment(1u, hr->capacity());
6253 } else if (hr->is_old()) {
6254 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6255 _old_count.increment(1u, hr->capacity());
6256 } else {
6257 // There are no other valid region types. Check for one invalid
6258 // one we can identify: pinned without old or humongous set.
6259 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6260 ShouldNotReachHere();
6261 }
6262 return false;
6263 }
6264
6265 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6266 guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
6267 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6268 old_set->total_capacity_bytes(), _old_count.capacity());
|
303 // Index of last region in the series + 1.
304 uint last = first + num_regions;
305
306 // We need to initialize the region(s) we just discovered. This is
307 // a bit tricky given that it can happen concurrently with
308 // refinement threads refining cards on these regions and
309 // potentially wanting to refine the BOT as they are scanning
310 // those cards (this can happen shortly after a cleanup; see CR
311 // 6991377). So we have to set up the region(s) carefully and in
312 // a specific order.
313
314 // The word size sum of all the regions we will allocate.
315 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
316 assert(word_size <= word_size_sum, "sanity");
317
318 // This will be the "starts humongous" region.
319 HeapRegion* first_hr = region_at(first);
320 // The header of the new object will be placed at the bottom of
321 // the first region.
322 HeapWord* new_obj = first_hr->bottom();
323 // This will be the new top of the new object.
324 HeapWord* obj_top = new_obj + word_size;
325
326 // First, we need to zero the header of the space that we will be
327 // allocating. When we update top further down, some refinement
328 // threads might try to scan the region. By zeroing the header we
329 // ensure that any thread that will try to scan the region will
330 // come across the zero klass word and bail out.
331 //
332 // NOTE: It would not have been correct to have used
333 // CollectedHeap::fill_with_object() and make the space look like
334 // an int array. The thread that is doing the allocation will
335 // later update the object header to a potentially different array
336 // type and, for a very short period of time, the klass and length
337 // fields will be inconsistent. This could cause a refinement
338 // thread to calculate the object size incorrectly.
339 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
340
341 // We will set up the first region as "starts humongous". This
342 // will also update the BOT covering all the regions to reflect
343 // that there is a single object that starts at the bottom of the
344 // first region.
345 first_hr->set_starts_humongous(obj_top);
346 first_hr->set_allocation_context(context);
347 // Then, if there are any, we will set up the "continues
348 // humongous" regions.
349 HeapRegion* hr = NULL;
350 for (uint i = first + 1; i < last; ++i) {
351 hr = region_at(i);
352 hr->set_continues_humongous(first_hr);
353 hr->set_allocation_context(context);
354 }
355
356 // Up to this point no concurrent thread would have been able to
357 // do any scanning on any region in this series. All the top
358 // fields still point to bottom, so the intersection between
359 // [bottom,top] and [card_start,card_end] will be empty. Before we
360 // update the top fields, we'll do a storestore to make sure that
361 // no thread sees the update to top before the zeroing of the
362 // object header and the BOT initialization.
363 OrderAccess::storestore();
364
365 // Now that the BOT and the object header have been initialized,
366 // we can update top of the "starts humongous" region.
367 first_hr->set_top(MIN2(first_hr->end(), obj_top));
368 if (_hr_printer.is_active()) {
369 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end());
370 }
371
372 // Now, we will update the top fields of the "continues humongous"
373 // regions. The reason we need to do this is that, otherwise,
374 // these regions would look empty and this will confuse parts of
375 // G1. For example, the code that looks for a consecutive number
376 // of empty regions will consider them empty and try to
377 // re-allocate them. We can extend is_empty() to also include
378 // !is_continues_humongous(), but it is easier to just update the top
379 // fields here. The way we set top for all regions (i.e., top ==
380 // end for all regions but the last one, top == new_top for the
381 // last one) is actually used when we will free up the humongous
382 // region in free_humongous_region().
383 hr = NULL;
384 for (uint i = first + 1; i < last; ++i) {
385 hr = region_at(i);
386 if ((i + 1) == last) {
387 // last continues humongous region
388 assert(hr->bottom() < obj_top && obj_top <= hr->end(),
389 "new_top should fall on this region");
390 hr->set_top(obj_top);
391 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top);
392 } else {
393 // not last one
394 assert(obj_top > hr->end(), "obj_top should be above this region");
395 hr->set_top(hr->end());
396 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
397 }
398 }
399 // If we have continues humongous regions (hr != NULL), its top should
400 // match obj_top.
401 assert(hr == NULL || (hr->top() == obj_top), "sanity");
402 check_bitmaps("Humongous Region Allocation", first_hr);
403
404 increase_used(word_size * HeapWordSize);
405
406 for (uint i = first; i < last; ++i) {
407 _humongous_set.add(region_at(i));
408 }
409
410 return new_obj;
411 }
412
413 // If could fit into free regions w/o expansion, try.
414 // Otherwise, if can expand, do so.
415 // Otherwise, if using ex regions might help, try with ex given back.
416 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
417 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
418
419 verify_region_sets_optional();
420
421 uint first = G1_NO_HRM_INDEX;
422 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
423
424 if (obj_regions == 1) {
425 // Only one region to allocate, try to use a fast path by directly allocating
426 // from the free lists. Do not try to expand here, we will potentially do that
427 // later.
428 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
1105 HeapWord* result = humongous_obj_allocate(word_size, context);
1106 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1107 collector_state()->set_initiate_conc_mark_if_possible(true);
1108 }
1109 return result;
1110 }
1111
1112 ShouldNotReachHere();
1113 }
1114
1115 class PostMCRemSetClearClosure: public HeapRegionClosure {
1116 G1CollectedHeap* _g1h;
1117 ModRefBarrierSet* _mr_bs;
1118 public:
1119 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1120 _g1h(g1h), _mr_bs(mr_bs) {}
1121
1122 bool doHeapRegion(HeapRegion* r) {
1123 HeapRegionRemSet* hrrs = r->rem_set();
1124
1125 _g1h->reset_gc_time_stamps(r);
1126
1127 if (r->is_continues_humongous()) {
1128 // We'll assert that the strong code root list and RSet is empty
1129 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1130 assert(hrrs->occupied() == 0, "RSet should be empty");
1131 return false;
1132 }
1133
1134 hrrs->clear();
1135 // You might think here that we could clear just the cards
1136 // corresponding to the used region. But no: if we leave a dirty card
1137 // in a region we might allocate into, then it would prevent that card
1138 // from being enqueued, and cause it to be missed.
1139 // Re: the performance cost: we shouldn't be doing full GC anyway!
1140 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1141
1142 return false;
1143 }
1144 };
1145
1146 void G1CollectedHeap::clear_rsets_post_compaction() {
1147 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1148 heap_region_iterate(&rs_clear);
1149 }
1150
1151 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1152 G1CollectedHeap* _g1h;
1153 UpdateRSOopClosure _cl;
1172
1173 public:
1174 ParRebuildRSTask(G1CollectedHeap* g1) :
1175 AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1176
1177 void work(uint worker_id) {
1178 RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1179 _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1180 }
1181 };
1182
1183 class PostCompactionPrinterClosure: public HeapRegionClosure {
1184 private:
1185 G1HRPrinter* _hr_printer;
1186 public:
1187 bool doHeapRegion(HeapRegion* hr) {
1188 assert(!hr->is_young(), "not expecting to find young regions");
1189 if (hr->is_free()) {
1190 // We only generate output for non-empty regions.
1191 } else if (hr->is_starts_humongous()) {
1192 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1193 } else if (hr->is_continues_humongous()) {
1194 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1195 } else if (hr->is_archive()) {
1196 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1197 } else if (hr->is_old()) {
1198 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1199 } else {
1200 ShouldNotReachHere();
1201 }
1202 return false;
1203 }
1204
1205 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1206 : _hr_printer(hr_printer) { }
1207 };
1208
1209 void G1CollectedHeap::print_hrm_post_compaction() {
1210 PostCompactionPrinterClosure cl(hr_printer());
1211 heap_region_iterate(&cl);
1212 }
2184 (ParallelGCThreads > 1),
2185 // mt discovery
2186 ParallelGCThreads,
2187 // degree of mt discovery
2188 true,
2189 // Reference discovery is atomic
2190 &_is_alive_closure_stw);
2191 // is alive closure
2192 // (for efficiency/performance)
2193 }
2194
2195 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2196 return g1_policy();
2197 }
2198
2199 size_t G1CollectedHeap::capacity() const {
2200 return _hrm.length() * HeapRegion::GrainBytes;
2201 }
2202
2203 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2204 hr->reset_gc_time_stamp();
2205 }
2206
2207 #ifndef PRODUCT
2208
2209 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2210 private:
2211 unsigned _gc_time_stamp;
2212 bool _failures;
2213
2214 public:
2215 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2216 _gc_time_stamp(gc_time_stamp), _failures(false) { }
2217
2218 virtual bool doHeapRegion(HeapRegion* hr) {
2219 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2220 if (_gc_time_stamp != region_gc_time_stamp) {
2221 gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2222 "expected %d", HR_FORMAT_PARAMS(hr),
2223 region_gc_time_stamp, _gc_time_stamp);
2224 _failures = true;
2252 }
2253
2254 // Computes the sum of the storage used by the various regions.
2255 size_t G1CollectedHeap::used() const {
2256 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2257 if (_archive_allocator != NULL) {
2258 result += _archive_allocator->used();
2259 }
2260 return result;
2261 }
2262
2263 size_t G1CollectedHeap::used_unlocked() const {
2264 return _summary_bytes_used;
2265 }
2266
2267 class SumUsedClosure: public HeapRegionClosure {
2268 size_t _used;
2269 public:
2270 SumUsedClosure() : _used(0) {}
2271 bool doHeapRegion(HeapRegion* r) {
2272 _used += r->used();
2273 return false;
2274 }
2275 size_t result() { return _used; }
2276 };
2277
2278 size_t G1CollectedHeap::recalculate_used() const {
2279 double recalculate_used_start = os::elapsedTime();
2280
2281 SumUsedClosure blk;
2282 heap_region_iterate(&blk);
2283
2284 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2285 return blk.result();
2286 }
2287
2288 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2289 switch (cause) {
2290 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2291 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2292 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2473 // Schedule a standard evacuation pause. We're setting word_size
2474 // to 0 which means that we are not requesting a post-GC allocation.
2475 VM_G1IncCollectionPause op(gc_count_before,
2476 0, /* word_size */
2477 false, /* should_initiate_conc_mark */
2478 g1_policy()->max_pause_time_ms(),
2479 cause);
2480 VMThread::execute(&op);
2481 } else {
2482 // Schedule a Full GC.
2483 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2484 VMThread::execute(&op);
2485 }
2486 }
2487 } while (retry_gc);
2488 }
2489
2490 bool G1CollectedHeap::is_in(const void* p) const {
2491 if (_hrm.reserved().contains(p)) {
2492 // Given that we know that p is in the reserved space,
2493 // heap_region_containing() should successfully
2494 // return the containing region.
2495 HeapRegion* hr = heap_region_containing(p);
2496 return hr->is_in(p);
2497 } else {
2498 return false;
2499 }
2500 }
2501
2502 #ifdef ASSERT
2503 bool G1CollectedHeap::is_in_exact(const void* p) const {
2504 bool contains = reserved_region().contains(p);
2505 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2506 if (contains && available) {
2507 return true;
2508 } else {
2509 return false;
2510 }
2511 }
2512 #endif
2513
2514 bool G1CollectedHeap::obj_in_cs(oop obj) {
2515 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
3012 _vo(vo),
3013 _failures(false) {}
3014
3015 bool failures() {
3016 return _failures;
3017 }
3018
3019 bool doHeapRegion(HeapRegion* r) {
3020 // For archive regions, verify there are no heap pointers to
3021 // non-pinned regions. For all others, verify liveness info.
3022 if (r->is_archive()) {
3023 VerifyArchiveRegionClosure verify_oop_pointers(r);
3024 r->object_iterate(&verify_oop_pointers);
3025 return true;
3026 }
3027 if (!r->is_continues_humongous()) {
3028 bool failures = false;
3029 r->verify(_vo, &failures);
3030 if (failures) {
3031 _failures = true;
3032 } else if (!r->is_starts_humongous()) {
3033 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3034 r->object_iterate(¬_dead_yet_cl);
3035 if (_vo != VerifyOption_G1UseNextMarking) {
3036 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3037 gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3038 "max_live_bytes " SIZE_FORMAT " "
3039 "< calculated " SIZE_FORMAT,
3040 p2i(r->bottom()), p2i(r->end()),
3041 r->max_live_bytes(),
3042 not_dead_yet_cl.live_bytes());
3043 _failures = true;
3044 }
3045 } else {
3046 // When vo == UseNextMarking we cannot currently do a sanity
3047 // check on the live bytes as the calculation has not been
3048 // finalized yet.
3049 }
3050 }
3051 }
3052 return false; // stop the region iteration if we hit a failure
5266 assert(free_list != NULL, "pre-condition");
5267
5268 if (G1VerifyBitmaps) {
5269 MemRegion mr(hr->bottom(), hr->end());
5270 concurrent_mark()->clearRangePrevBitmap(mr);
5271 }
5272
5273 // Clear the card counts for this region.
5274 // Note: we only need to do this if the region is not young
5275 // (since we don't refine cards in young regions).
5276 if (!hr->is_young()) {
5277 _cg1r->hot_card_cache()->reset_card_counts(hr);
5278 }
5279 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5280 free_list->add_ordered(hr);
5281 }
5282
5283 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5284 FreeRegionList* free_list,
5285 bool par) {
5286 assert(hr->is_humongous(), "this is only for humongous regions");
5287 assert(free_list != NULL, "pre-condition");
5288 hr->clear_humongous();
5289 free_region(hr, free_list, par);
5290 }
5291
5292 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5293 const HeapRegionSetCount& humongous_regions_removed) {
5294 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5295 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5296 _old_set.bulk_remove(old_regions_removed);
5297 _humongous_set.bulk_remove(humongous_regions_removed);
5298 }
5299
5300 }
5301
5302 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5303 assert(list != NULL, "list can't be null");
5304 if (!list->is_empty()) {
5305 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5306 _hrm.insert_list_into_free_list(list);
5307 }
5308 }
5309
5433
5434 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5435 if (!G1VerifyBitmaps) return;
5436
5437 guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5438 }
5439
5440 class G1VerifyBitmapClosure : public HeapRegionClosure {
5441 private:
5442 const char* _caller;
5443 G1CollectedHeap* _g1h;
5444 bool _failures;
5445
5446 public:
5447 G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5448 _caller(caller), _g1h(g1h), _failures(false) { }
5449
5450 bool failures() { return _failures; }
5451
5452 virtual bool doHeapRegion(HeapRegion* hr) {
5453 bool result = _g1h->verify_bitmaps(_caller, hr);
5454 if (!result) {
5455 _failures = true;
5456 }
5457 return false;
5458 }
5459 };
5460
5461 void G1CollectedHeap::check_bitmaps(const char* caller) {
5462 if (!G1VerifyBitmaps) return;
5463
5464 G1VerifyBitmapClosure cl(caller, this);
5465 heap_region_iterate(&cl);
5466 guarantee(!cl.failures(), "bitmap verification");
5467 }
5468
5469 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5470 private:
5471 bool _failures;
5472 public:
5706 //
5707 // It is not required to check whether the object has been found dead by marking
5708 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5709 // all objects allocated during that time are considered live.
5710 // SATB marking is even more conservative than the remembered set.
5711 // So if at this point in the collection there is no remembered set entry,
5712 // nobody has a reference to it.
5713 // At the start of collection we flush all refinement logs, and remembered sets
5714 // are completely up-to-date wrt to references to the humongous object.
5715 //
5716 // Other implementation considerations:
5717 // - never consider object arrays at this time because they would pose
5718 // considerable effort for cleaning up the the remembered sets. This is
5719 // required because stale remembered sets might reference locations that
5720 // are currently allocated into.
5721 uint region_idx = r->hrm_index();
5722 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5723 !r->rem_set()->is_empty()) {
5724
5725 if (G1TraceEagerReclaimHumongousObjects) {
5726 gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5727 region_idx,
5728 (size_t)obj->size() * HeapWordSize,
5729 p2i(r->bottom()),
5730 r->rem_set()->occupied(),
5731 r->rem_set()->strong_code_roots_list_length(),
5732 next_bitmap->isMarked(r->bottom()),
5733 g1h->is_humongous_reclaim_candidate(region_idx),
5734 obj->is_typeArray()
5735 );
5736 }
5737
5738 return false;
5739 }
5740
5741 guarantee(obj->is_typeArray(),
5742 "Only eagerly reclaiming type arrays is supported, but the object "
5743 PTR_FORMAT " is not.", p2i(r->bottom()));
5744
5745 if (G1TraceEagerReclaimHumongousObjects) {
5746 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5747 region_idx,
5748 (size_t)obj->size() * HeapWordSize,
5749 p2i(r->bottom()),
5750 r->rem_set()->occupied(),
5751 r->rem_set()->strong_code_roots_list_length(),
5752 next_bitmap->isMarked(r->bottom()),
5753 g1h->is_humongous_reclaim_candidate(region_idx),
5754 obj->is_typeArray()
5755 );
5756 }
5757 // Need to clear mark bit of the humongous object if already set.
5758 if (next_bitmap->isMarked(r->bottom())) {
5759 next_bitmap->clear(r->bottom());
5760 }
5761 do {
5762 HeapRegion* next = g1h->next_humongous_region(r);
5763 _freed_bytes += r->used();
5764 r->set_containing_set(NULL);
5765 _humongous_regions_removed.increment(1u, r->capacity());
5766 g1h->free_humongous_region(r, _free_region_list, false);
5767 r = next;
5768 } while (r != NULL);
5769
5770 return false;
5771 }
5772
5773 HeapRegionSetCount& humongous_free_count() {
5774 return _humongous_regions_removed;
5775 }
5776
5777 size_t bytes_freed() const {
5778 return _freed_bytes;
5779 }
5780
5781 size_t humongous_reclaimed() const {
5782 return _humongous_regions_removed.length();
5783 }
5784 };
5785
5786 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5787 assert_at_safepoint(true);
5788
5983
5984 class RebuildRegionSetsClosure : public HeapRegionClosure {
5985 private:
5986 bool _free_list_only;
5987 HeapRegionSet* _old_set;
5988 HeapRegionManager* _hrm;
5989 size_t _total_used;
5990
5991 public:
5992 RebuildRegionSetsClosure(bool free_list_only,
5993 HeapRegionSet* old_set, HeapRegionManager* hrm) :
5994 _free_list_only(free_list_only),
5995 _old_set(old_set), _hrm(hrm), _total_used(0) {
5996 assert(_hrm->num_free_regions() == 0, "pre-condition");
5997 if (!free_list_only) {
5998 assert(_old_set->is_empty(), "pre-condition");
5999 }
6000 }
6001
6002 bool doHeapRegion(HeapRegion* r) {
6003 if (r->is_empty()) {
6004 // Add free regions to the free list
6005 r->set_free();
6006 r->set_allocation_context(AllocationContext::system());
6007 _hrm->insert_into_free_list(r);
6008 } else if (!_free_list_only) {
6009 assert(!r->is_young(), "we should not come across young regions");
6010
6011 if (r->is_humongous()) {
6012 // We ignore humongous regions. We left the humongous set unchanged.
6013 } else {
6014 // Objects that were compacted would have ended up on regions
6015 // that were previously old or free. Archive regions (which are
6016 // old) will not have been touched.
6017 assert(r->is_free() || r->is_old(), "invariant");
6018 // We now consider them old, so register as such. Leave
6019 // archive regions set that way, however, while still adding
6020 // them to the old set.
6021 if (!r->is_archive()) {
6022 r->set_old();
6170 // Heap region set verification
6171
6172 class VerifyRegionListsClosure : public HeapRegionClosure {
6173 private:
6174 HeapRegionSet* _old_set;
6175 HeapRegionSet* _humongous_set;
6176 HeapRegionManager* _hrm;
6177
6178 public:
6179 HeapRegionSetCount _old_count;
6180 HeapRegionSetCount _humongous_count;
6181 HeapRegionSetCount _free_count;
6182
6183 VerifyRegionListsClosure(HeapRegionSet* old_set,
6184 HeapRegionSet* humongous_set,
6185 HeapRegionManager* hrm) :
6186 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6187 _old_count(), _humongous_count(), _free_count(){ }
6188
6189 bool doHeapRegion(HeapRegion* hr) {
6190 if (hr->is_young()) {
6191 // TODO
6192 } else if (hr->is_humongous()) {
6193 assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
6194 _humongous_count.increment(1u, hr->capacity());
6195 } else if (hr->is_empty()) {
6196 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6197 _free_count.increment(1u, hr->capacity());
6198 } else if (hr->is_old()) {
6199 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6200 _old_count.increment(1u, hr->capacity());
6201 } else {
6202 // There are no other valid region types. Check for one invalid
6203 // one we can identify: pinned without old or humongous set.
6204 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6205 ShouldNotReachHere();
6206 }
6207 return false;
6208 }
6209
6210 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6211 guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
6212 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6213 old_set->total_capacity_bytes(), _old_count.capacity());
|