< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 179   _length = 0;
 180 
 181   empty_list(_survivor_head);
 182   _survivor_head = NULL;
 183   _survivor_tail = NULL;
 184   _survivor_length = 0;
 185 
 186   _last_sampled_rs_lengths = 0;
 187 
 188   assert(check_list_empty(false), "just making sure...");
 189 }
 190 
 191 bool YoungList::check_list_well_formed() {
 192   bool ret = true;
 193 
 194   uint length = 0;
 195   HeapRegion* curr = _head;
 196   HeapRegion* last = NULL;
 197   while (curr != NULL) {
 198     if (!curr->is_young()) {
 199       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 200                              "incorrectly tagged (y: %d, surv: %d)",
 201                              p2i(curr->bottom()), p2i(curr->end()),
 202                              curr->is_young(), curr->is_survivor());
 203       ret = false;
 204     }
 205     ++length;
 206     last = curr;
 207     curr = curr->get_next_young_region();
 208   }
 209   ret = ret && (length == _length);
 210 
 211   if (!ret) {
 212     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 213     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 214                            length, _length);
 215   }
 216 
 217   return ret;
 218 }
 219 


 308   // Don't clear the survivor list handles until the start of
 309   // the next evacuation pause - we need it in order to re-tag
 310   // the survivor regions from this evacuation pause as 'young'
 311   // at the start of the next.
 312 
 313   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 314 
 315   assert(check_list_well_formed(), "young list should be well formed");
 316 }
 317 
 318 void YoungList::print() {
 319   HeapRegion* lists[] = {_head,   _survivor_head};
 320   const char* names[] = {"YOUNG", "SURVIVOR"};
 321 
 322   for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
 323     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 324     HeapRegion *curr = lists[list];
 325     if (curr == NULL)
 326       gclog_or_tty->print_cr("  empty");
 327     while (curr != NULL) {
 328       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
 329                              HR_FORMAT_PARAMS(curr),
 330                              p2i(curr->prev_top_at_mark_start()),
 331                              p2i(curr->next_top_at_mark_start()),
 332                              curr->age_in_surv_rate_group_cond());
 333       curr = curr->get_next_young_region();
 334     }
 335   }
 336 
 337   gclog_or_tty->cr();
 338 }
 339 
 340 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 341   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 342 }
 343 
 344 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 345   // The from card cache is not the memory that is actually committed. So we cannot
 346   // take advantage of the zero_filled parameter.
 347   reset_from_card_cache(start_idx, num_regions);
 348 }


 412 HeapRegion*
 413 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 414   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 415   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 416     if (!_secondary_free_list.is_empty()) {
 417       if (G1ConcRegionFreeingVerbose) {
 418         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 419                                "secondary_free_list has %u entries",
 420                                _secondary_free_list.length());
 421       }
 422       // It looks as if there are free regions available on the
 423       // secondary_free_list. Let's move them to the free_list and try
 424       // again to allocate from it.
 425       append_secondary_free_list();
 426 
 427       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 428              "empty we should have moved at least one entry to the free_list");
 429       HeapRegion* res = _hrm.allocate_free_region(is_old);
 430       if (G1ConcRegionFreeingVerbose) {
 431         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 432                                "allocated "HR_FORMAT" from secondary_free_list",
 433                                HR_FORMAT_PARAMS(res));
 434       }
 435       return res;
 436     }
 437 
 438     // Wait here until we get notified either when (a) there are no
 439     // more free regions coming or (b) some regions have been moved on
 440     // the secondary_free_list.
 441     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 442   }
 443 
 444   if (G1ConcRegionFreeingVerbose) {
 445     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 446                            "could not allocate from secondary_free_list");
 447   }
 448   return NULL;
 449 }
 450 
 451 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 452   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,


1466   // 32-bit size_t's.
1467   double used_after_gc_d = (double) used_after_gc;
1468   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1469   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1470 
1471   // Let's make sure that they are both under the max heap size, which
1472   // by default will make them fit into a size_t.
1473   double desired_capacity_upper_bound = (double) max_heap_size;
1474   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1475                                     desired_capacity_upper_bound);
1476   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1477                                     desired_capacity_upper_bound);
1478 
1479   // We can now safely turn them into size_t's.
1480   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1481   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1482 
1483   // This assert only makes sense here, before we adjust them
1484   // with respect to the min and max heap size.
1485   assert(minimum_desired_capacity <= maximum_desired_capacity,
1486          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1487                  "maximum_desired_capacity = "SIZE_FORMAT,
1488                  minimum_desired_capacity, maximum_desired_capacity));
1489 
1490   // Should not be greater than the heap max size. No need to adjust
1491   // it with respect to the heap min size as it's a lower bound (i.e.,
1492   // we'll try to make the capacity larger than it, not smaller).
1493   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1494   // Should not be less than the heap min size. No need to adjust it
1495   // with respect to the heap max size as it's an upper bound (i.e.,
1496   // we'll try to make the capacity smaller than it, not greater).
1497   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1498 
1499   if (capacity_after_gc < minimum_desired_capacity) {
1500     // Don't expand unless it's significant
1501     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1502     ergo_verbose4(ErgoHeapSizing,
1503                   "attempt heap expansion",
1504                   ergo_format_reason("capacity lower than "
1505                                      "min desired capacity after Full GC")
1506                   ergo_format_byte("capacity")
1507                   ergo_format_byte("occupancy")


2126       assert(chr->is_continues_humongous(), "sanity");
2127       chr->reset_gc_time_stamp();
2128     }
2129   }
2130 }
2131 
2132 #ifndef PRODUCT
2133 
2134 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2135 private:
2136   unsigned _gc_time_stamp;
2137   bool _failures;
2138 
2139 public:
2140   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2141     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2142 
2143   virtual bool doHeapRegion(HeapRegion* hr) {
2144     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2145     if (_gc_time_stamp != region_gc_time_stamp) {
2146       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2147                              "expected %d", HR_FORMAT_PARAMS(hr),
2148                              region_gc_time_stamp, _gc_time_stamp);
2149       _failures = true;
2150     }
2151     return false;
2152   }
2153 
2154   bool failures() { return _failures; }
2155 };
2156 
2157 void G1CollectedHeap::check_gc_time_stamps() {
2158   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2159   heap_region_iterate(&cl);
2160   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2161 }
2162 #endif // PRODUCT
2163 
2164 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2165                                                  DirtyCardQueue* into_cset_dcq,
2166                                                  bool concurrent,


2735 private:
2736   G1CollectedHeap* _g1h;
2737   VerifyOption     _vo;
2738   bool             _failures;
2739 public:
2740   // _vo == UsePrevMarking -> use "prev" marking information,
2741   // _vo == UseNextMarking -> use "next" marking information,
2742   // _vo == UseMarkWord    -> use mark word from object header.
2743   VerifyRootsClosure(VerifyOption vo) :
2744     _g1h(G1CollectedHeap::heap()),
2745     _vo(vo),
2746     _failures(false) { }
2747 
2748   bool failures() { return _failures; }
2749 
2750   template <class T> void do_oop_nv(T* p) {
2751     T heap_oop = oopDesc::load_heap_oop(p);
2752     if (!oopDesc::is_null(heap_oop)) {
2753       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2754       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2755         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2756                                "points to dead obj "PTR_FORMAT, p2i(p), p2i(obj));
2757         if (_vo == VerifyOption_G1UseMarkWord) {
2758           gclog_or_tty->print_cr("  Mark word: "INTPTR_FORMAT, (intptr_t)obj->mark());
2759         }
2760         obj->print_on(gclog_or_tty);
2761         _failures = true;
2762       }
2763     }
2764   }
2765 
2766   void do_oop(oop* p)       { do_oop_nv(p); }
2767   void do_oop(narrowOop* p) { do_oop_nv(p); }
2768 };
2769 
2770 class G1VerifyCodeRootOopClosure: public OopClosure {
2771   G1CollectedHeap* _g1h;
2772   OopClosure* _root_cl;
2773   nmethod* _nm;
2774   VerifyOption _vo;
2775   bool _failures;
2776 
2777   template <class T> void do_oop_work(T* p) {
2778     // First verify that this root is live


2785 
2786     // Don't check the code roots during marking verification in a full GC
2787     if (_vo == VerifyOption_G1UseMarkWord) {
2788       return;
2789     }
2790 
2791     // Now verify that the current nmethod (which contains p) is
2792     // in the code root list of the heap region containing the
2793     // object referenced by p.
2794 
2795     T heap_oop = oopDesc::load_heap_oop(p);
2796     if (!oopDesc::is_null(heap_oop)) {
2797       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2798 
2799       // Now fetch the region containing the object
2800       HeapRegion* hr = _g1h->heap_region_containing(obj);
2801       HeapRegionRemSet* hrrs = hr->rem_set();
2802       // Verify that the strong code root list for this region
2803       // contains the nmethod
2804       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2805         gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
2806                                "from nmethod "PTR_FORMAT" not in strong "
2807                                "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
2808                                p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2809         _failures = true;
2810       }
2811     }
2812   }
2813 
2814 public:
2815   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2816     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2817 
2818   void do_oop(oop* p) { do_oop_work(p); }
2819   void do_oop(narrowOop* p) { do_oop_work(p); }
2820 
2821   void set_nmethod(nmethod* nm) { _nm = nm; }
2822   bool failures() { return _failures; }
2823 };
2824 
2825 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2826   G1VerifyCodeRootOopClosure* _oop_cl;
2827 


2934   VerifyRegionClosure(bool par, VerifyOption vo)
2935     : _par(par),
2936       _vo(vo),
2937       _failures(false) {}
2938 
2939   bool failures() {
2940     return _failures;
2941   }
2942 
2943   bool doHeapRegion(HeapRegion* r) {
2944     if (!r->is_continues_humongous()) {
2945       bool failures = false;
2946       r->verify(_vo, &failures);
2947       if (failures) {
2948         _failures = true;
2949       } else {
2950         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2951         r->object_iterate(&not_dead_yet_cl);
2952         if (_vo != VerifyOption_G1UseNextMarking) {
2953           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2954             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
2955                                    "max_live_bytes "SIZE_FORMAT" "
2956                                    "< calculated "SIZE_FORMAT,
2957                                    p2i(r->bottom()), p2i(r->end()),
2958                                    r->max_live_bytes(),
2959                                  not_dead_yet_cl.live_bytes());
2960             _failures = true;
2961           }
2962         } else {
2963           // When vo == UseNextMarking we cannot currently do a sanity
2964           // check on the live bytes as the calculation has not been
2965           // finalized yet.
2966         }
2967       }
2968     }
2969     return false; // stop the region iteration if we hit a failure
2970   }
2971 };
2972 
2973 // This is the task used for parallel verification of the heap regions
2974 
2975 class G1ParVerifyTask: public AbstractGangTask {
2976 private:


3224   if (G1SummarizeConcMark) {
3225     concurrent_mark()->print_summary_info();
3226   }
3227   g1_policy()->print_yg_surv_rate_info();
3228 }
3229 
3230 #ifndef PRODUCT
3231 // Helpful for debugging RSet issues.
3232 
3233 class PrintRSetsClosure : public HeapRegionClosure {
3234 private:
3235   const char* _msg;
3236   size_t _occupied_sum;
3237 
3238 public:
3239   bool doHeapRegion(HeapRegion* r) {
3240     HeapRegionRemSet* hrrs = r->rem_set();
3241     size_t occupied = hrrs->occupied();
3242     _occupied_sum += occupied;
3243 
3244     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3245                            HR_FORMAT_PARAMS(r));
3246     if (occupied == 0) {
3247       gclog_or_tty->print_cr("  RSet is empty");
3248     } else {
3249       hrrs->print();
3250     }
3251     gclog_or_tty->print_cr("----------");
3252     return false;
3253   }
3254 
3255   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3256     gclog_or_tty->cr();
3257     gclog_or_tty->print_cr("========================================");
3258     gclog_or_tty->print_cr("%s", msg);
3259     gclog_or_tty->cr();
3260   }
3261 
3262   ~PrintRSetsClosure() {
3263     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3264     gclog_or_tty->print_cr("========================================");
3265     gclog_or_tty->cr();
3266   }
3267 };
3268 
3269 void G1CollectedHeap::print_cset_rsets() {
3270   PrintRSetsClosure cl("Printing CSet RSets");
3271   collection_set_iterate(&cl);
3272 }
3273 
3274 void G1CollectedHeap::print_all_rsets() {
3275   PrintRSetsClosure cl("Printing All RSets");;
3276   heap_region_iterate(&cl);
3277 }
3278 #endif // PRODUCT
3279 
3280 G1CollectedHeap* G1CollectedHeap::heap() {
3281   CollectedHeap* heap = Universe::heap();
3282   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3283   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");


4061 }
4062 
4063 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4064   _evac_failure_scan_stack->push(obj);
4065 }
4066 
4067 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4068   assert(_evac_failure_scan_stack != NULL, "precondition");
4069 
4070   while (_evac_failure_scan_stack->length() > 0) {
4071      oop obj = _evac_failure_scan_stack->pop();
4072      _evac_failure_closure->set_region(heap_region_containing(obj));
4073      obj->oop_iterate_backwards(_evac_failure_closure);
4074   }
4075 }
4076 
4077 oop
4078 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4079                                                oop old) {
4080   assert(obj_in_cs(old),
4081          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4082                  p2i(old)));
4083   markOop m = old->mark();
4084   oop forward_ptr = old->forward_to_atomic(old);
4085   if (forward_ptr == NULL) {
4086     // Forward-to-self succeeded.
4087     assert(_par_scan_state != NULL, "par scan state");
4088     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4089     uint queue_num = _par_scan_state->queue_num();
4090 
4091     _evacuation_failed = true;
4092     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4093     if (_evac_failure_closure != cl) {
4094       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4095       assert(!_drain_in_progress,
4096              "Should only be true while someone holds the lock.");
4097       // Set the global evac-failure closure to the current thread's.
4098       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4099       set_evac_failure_closure(cl);
4100       // Now do the common part.
4101       handle_evacuation_failure_common(old, m);
4102       // Reset to NULL.
4103       set_evac_failure_closure(NULL);
4104     } else {
4105       // The lock is already held, and this is recursive.
4106       assert(_drain_in_progress, "This should only be the recursive case.");
4107       handle_evacuation_failure_common(old, m);
4108     }
4109     return old;
4110   } else {
4111     // Forward-to-self failed. Either someone else managed to allocate
4112     // space for this object (old != forward_ptr) or they beat us in
4113     // self-forwarding it (old == forward_ptr).
4114     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4115            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4116                    "should not be in the CSet",
4117                    p2i(old), p2i(forward_ptr)));
4118     return forward_ptr;
4119   }
4120 }
4121 
4122 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4123   preserve_mark_if_necessary(old, m);
4124 
4125   HeapRegion* r = heap_region_containing(old);
4126   if (!r->evacuation_failed()) {
4127     r->set_evacuation_failed(true);
4128     _hr_printer.evac_failure(r);
4129   }
4130 
4131   push_on_evac_failure_scan_stack(old);
4132 
4133   if (!_drain_in_progress) {
4134     // prevent recursion in copy_to_survivor_space()
4135     _drain_in_progress = true;


4488     _initial_string_table_size = StringTable::the_table()->table_size();
4489     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4490     if (process_strings) {
4491       StringTable::clear_parallel_claimed_index();
4492     }
4493     if (process_symbols) {
4494       SymbolTable::clear_parallel_claimed_index();
4495     }
4496   }
4497 
4498   ~G1StringSymbolTableUnlinkTask() {
4499     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4500               err_msg("claim value %d after unlink less than initial string table size %d",
4501                       StringTable::parallel_claimed_index(), _initial_string_table_size));
4502     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4503               err_msg("claim value %d after unlink less than initial symbol table size %d",
4504                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4505 
4506     if (G1TraceStringSymbolTableScrubbing) {
4507       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4508                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4509                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4510                              strings_processed(), strings_removed(),
4511                              symbols_processed(), symbols_removed());
4512     }
4513   }
4514 
4515   void work(uint worker_id) {
4516     int strings_processed = 0;
4517     int strings_removed = 0;
4518     int symbols_processed = 0;
4519     int symbols_removed = 0;
4520     if (_process_strings) {
4521       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4522       Atomic::add(strings_processed, &_strings_processed);
4523       Atomic::add(strings_removed, &_strings_removed);
4524     }
4525     if (_process_symbols) {
4526       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4527       Atomic::add(symbols_processed, &_symbols_processed);
4528       Atomic::add(symbols_removed, &_symbols_removed);
4529     }


5612     ct_bs->verify_g1_young_region(mr);
5613   } else {
5614     ct_bs->verify_dirty_region(mr);
5615   }
5616 }
5617 
5618 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5619   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5620   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5621     verify_dirty_region(hr);
5622   }
5623 }
5624 
5625 void G1CollectedHeap::verify_dirty_young_regions() {
5626   verify_dirty_young_list(_young_list->first_region());
5627 }
5628 
5629 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5630                                                HeapWord* tams, HeapWord* end) {
5631   guarantee(tams <= end,
5632             err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, p2i(tams), p2i(end)));
5633   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5634   if (result < end) {
5635     gclog_or_tty->cr();
5636     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
5637                            bitmap_name, p2i(result));
5638     gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
5639                            bitmap_name, p2i(tams), p2i(end));
5640     return false;
5641   }
5642   return true;
5643 }
5644 
5645 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5646   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5647   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5648 
5649   HeapWord* bottom = hr->bottom();
5650   HeapWord* ptams  = hr->prev_top_at_mark_start();
5651   HeapWord* ntams  = hr->next_top_at_mark_start();
5652   HeapWord* end    = hr->end();
5653 
5654   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5655 
5656   bool res_n = true;
5657   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5658   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5659   // if we happen to be in that state.
5660   if (mark_in_progress() || !_cmThread->in_progress()) {
5661     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5662   }
5663   if (!res_p || !res_n) {
5664     gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
5665                            HR_FORMAT_PARAMS(hr));
5666     gclog_or_tty->print_cr("#### Caller: %s", caller);
5667     return false;
5668   }
5669   return true;
5670 }
5671 
5672 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5673   if (!G1VerifyBitmaps) return;
5674 
5675   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5676 }
5677 
5678 class G1VerifyBitmapClosure : public HeapRegionClosure {
5679 private:
5680   const char* _caller;
5681   G1CollectedHeap* _g1h;
5682   bool _failures;
5683 
5684 public:


5943     //
5944     // It is not required to check whether the object has been found dead by marking
5945     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5946     // all objects allocated during that time are considered live.
5947     // SATB marking is even more conservative than the remembered set.
5948     // So if at this point in the collection there is no remembered set entry,
5949     // nobody has a reference to it.
5950     // At the start of collection we flush all refinement logs, and remembered sets
5951     // are completely up-to-date wrt to references to the humongous object.
5952     //
5953     // Other implementation considerations:
5954     // - never consider object arrays at this time because they would pose
5955     // considerable effort for cleaning up the the remembered sets. This is
5956     // required because stale remembered sets might reference locations that
5957     // are currently allocated into.
5958     uint region_idx = r->hrm_index();
5959     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5960         !r->rem_set()->is_empty()) {
5961 
5962       if (G1TraceEagerReclaimHumongousObjects) {
5963         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length %u with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
5964                                region_idx,
5965                                (size_t)obj->size() * HeapWordSize,
5966                                p2i(r->bottom()),
5967                                r->region_num(),
5968                                r->rem_set()->occupied(),
5969                                r->rem_set()->strong_code_roots_list_length(),
5970                                next_bitmap->isMarked(r->bottom()),
5971                                g1h->is_humongous_reclaim_candidate(region_idx),
5972                                obj->is_typeArray()
5973                               );
5974       }
5975 
5976       return false;
5977     }
5978 
5979     guarantee(obj->is_typeArray(),
5980               err_msg("Only eagerly reclaiming type arrays is supported, but the object "
5981                       PTR_FORMAT " is not.",
5982                       p2i(r->bottom())));
5983 
5984     if (G1TraceEagerReclaimHumongousObjects) {
5985       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length %u with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
5986                              region_idx,
5987                              (size_t)obj->size() * HeapWordSize,
5988                              p2i(r->bottom()),
5989                              r->region_num(),
5990                              r->rem_set()->occupied(),
5991                              r->rem_set()->strong_code_roots_list_length(),
5992                              next_bitmap->isMarked(r->bottom()),
5993                              g1h->is_humongous_reclaim_candidate(region_idx),
5994                              obj->is_typeArray()
5995                             );
5996     }
5997     // Need to clear mark bit of the humongous object if already set.
5998     if (next_bitmap->isMarked(r->bottom())) {
5999       next_bitmap->clear(r->bottom());
6000     }
6001     _freed_bytes += r->used();
6002     r->set_containing_set(NULL);
6003     _humongous_regions_removed.increment(1u, r->capacity());
6004     g1h->free_humongous_region(r, _free_region_list, false);
6005 


6119     }
6120   }
6121 
6122   if (G1ConcRegionFreeingVerbose) {
6123     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6124                            "done waiting for free regions");
6125   }
6126 }
6127 
6128 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6129   _young_list->push_region(hr);
6130 }
6131 
6132 class NoYoungRegionsClosure: public HeapRegionClosure {
6133 private:
6134   bool _success;
6135 public:
6136   NoYoungRegionsClosure() : _success(true) { }
6137   bool doHeapRegion(HeapRegion* r) {
6138     if (r->is_young()) {
6139       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
6140                              p2i(r->bottom()), p2i(r->end()));
6141       _success = false;
6142     }
6143     return false;
6144   }
6145   bool success() { return _success; }
6146 };
6147 
6148 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6149   bool ret = _young_list->check_list_empty(check_sample);
6150 
6151   if (check_heap) {
6152     NoYoungRegionsClosure closure;
6153     heap_region_iterate(&closure);
6154     ret = ret && closure.success();
6155   }
6156 
6157   return ret;
6158 }
6159 


6248   size_t total_used() {
6249     return _total_used;
6250   }
6251 };
6252 
6253 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6254   assert_at_safepoint(true /* should_be_vm_thread */);
6255 
6256   if (!free_list_only) {
6257     _young_list->empty_list();
6258   }
6259 
6260   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6261   heap_region_iterate(&cl);
6262 
6263   if (!free_list_only) {
6264     _allocator->set_used(cl.total_used());
6265   }
6266   assert(_allocator->used_unlocked() == recalculate_used(),
6267          err_msg("inconsistent _allocator->used_unlocked(), "
6268                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6269                  _allocator->used_unlocked(), recalculate_used()));
6270 }
6271 
6272 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6273   _refine_cte_cl->set_concurrent(concurrent);
6274 }
6275 
6276 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6277   HeapRegion* hr = heap_region_containing(p);
6278   return hr->is_in(p);
6279 }
6280 
6281 // Methods for the mutator alloc region
6282 
6283 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6284                                                       bool force) {
6285   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6286   assert(!force || g1_policy()->can_expand_young_list(),
6287          "if force is true we should be able to expand the young list");
6288   bool young_list_full = g1_policy()->is_young_list_full();


6468   // Finally, make sure that the region accounting in the lists is
6469   // consistent with what we see in the heap.
6470 
6471   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6472   heap_region_iterate(&cl);
6473   cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6474 }
6475 
6476 // Optimized nmethod scanning
6477 
6478 class RegisterNMethodOopClosure: public OopClosure {
6479   G1CollectedHeap* _g1h;
6480   nmethod* _nm;
6481 
6482   template <class T> void do_oop_work(T* p) {
6483     T heap_oop = oopDesc::load_heap_oop(p);
6484     if (!oopDesc::is_null(heap_oop)) {
6485       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6486       HeapRegion* hr = _g1h->heap_region_containing(obj);
6487       assert(!hr->is_continues_humongous(),
6488              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6489                      " starting at "HR_FORMAT,
6490                      p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6491 
6492       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6493       hr->add_strong_code_root_locked(_nm);
6494     }
6495   }
6496 
6497 public:
6498   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6499     _g1h(g1h), _nm(nm) {}
6500 
6501   void do_oop(oop* p)       { do_oop_work(p); }
6502   void do_oop(narrowOop* p) { do_oop_work(p); }
6503 };
6504 
6505 class UnregisterNMethodOopClosure: public OopClosure {
6506   G1CollectedHeap* _g1h;
6507   nmethod* _nm;
6508 
6509   template <class T> void do_oop_work(T* p) {
6510     T heap_oop = oopDesc::load_heap_oop(p);
6511     if (!oopDesc::is_null(heap_oop)) {
6512       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6513       HeapRegion* hr = _g1h->heap_region_containing(obj);
6514       assert(!hr->is_continues_humongous(),
6515              err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6516                      " starting at "HR_FORMAT,
6517                      p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6518 
6519       hr->remove_strong_code_root(_nm);
6520     }
6521   }
6522 
6523 public:
6524   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6525     _g1h(g1h), _nm(nm) {}
6526 
6527   void do_oop(oop* p)       { do_oop_work(p); }
6528   void do_oop(narrowOop* p) { do_oop_work(p); }
6529 };
6530 
6531 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6532   CollectedHeap::register_nmethod(nm);
6533 
6534   guarantee(nm != NULL, "sanity");
6535   RegisterNMethodOopClosure reg_cl(this, nm);
6536   nm->oops_do(&reg_cl);




 179   _length = 0;
 180 
 181   empty_list(_survivor_head);
 182   _survivor_head = NULL;
 183   _survivor_tail = NULL;
 184   _survivor_length = 0;
 185 
 186   _last_sampled_rs_lengths = 0;
 187 
 188   assert(check_list_empty(false), "just making sure...");
 189 }
 190 
 191 bool YoungList::check_list_well_formed() {
 192   bool ret = true;
 193 
 194   uint length = 0;
 195   HeapRegion* curr = _head;
 196   HeapRegion* last = NULL;
 197   while (curr != NULL) {
 198     if (!curr->is_young()) {
 199       gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
 200                              "incorrectly tagged (y: %d, surv: %d)",
 201                              p2i(curr->bottom()), p2i(curr->end()),
 202                              curr->is_young(), curr->is_survivor());
 203       ret = false;
 204     }
 205     ++length;
 206     last = curr;
 207     curr = curr->get_next_young_region();
 208   }
 209   ret = ret && (length == _length);
 210 
 211   if (!ret) {
 212     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 213     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 214                            length, _length);
 215   }
 216 
 217   return ret;
 218 }
 219 


 308   // Don't clear the survivor list handles until the start of
 309   // the next evacuation pause - we need it in order to re-tag
 310   // the survivor regions from this evacuation pause as 'young'
 311   // at the start of the next.
 312 
 313   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 314 
 315   assert(check_list_well_formed(), "young list should be well formed");
 316 }
 317 
 318 void YoungList::print() {
 319   HeapRegion* lists[] = {_head,   _survivor_head};
 320   const char* names[] = {"YOUNG", "SURVIVOR"};
 321 
 322   for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
 323     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 324     HeapRegion *curr = lists[list];
 325     if (curr == NULL)
 326       gclog_or_tty->print_cr("  empty");
 327     while (curr != NULL) {
 328       gclog_or_tty->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
 329                              HR_FORMAT_PARAMS(curr),
 330                              p2i(curr->prev_top_at_mark_start()),
 331                              p2i(curr->next_top_at_mark_start()),
 332                              curr->age_in_surv_rate_group_cond());
 333       curr = curr->get_next_young_region();
 334     }
 335   }
 336 
 337   gclog_or_tty->cr();
 338 }
 339 
 340 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 341   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 342 }
 343 
 344 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 345   // The from card cache is not the memory that is actually committed. So we cannot
 346   // take advantage of the zero_filled parameter.
 347   reset_from_card_cache(start_idx, num_regions);
 348 }


 412 HeapRegion*
 413 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 414   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 415   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 416     if (!_secondary_free_list.is_empty()) {
 417       if (G1ConcRegionFreeingVerbose) {
 418         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 419                                "secondary_free_list has %u entries",
 420                                _secondary_free_list.length());
 421       }
 422       // It looks as if there are free regions available on the
 423       // secondary_free_list. Let's move them to the free_list and try
 424       // again to allocate from it.
 425       append_secondary_free_list();
 426 
 427       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 428              "empty we should have moved at least one entry to the free_list");
 429       HeapRegion* res = _hrm.allocate_free_region(is_old);
 430       if (G1ConcRegionFreeingVerbose) {
 431         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 432                                "allocated " HR_FORMAT " from secondary_free_list",
 433                                HR_FORMAT_PARAMS(res));
 434       }
 435       return res;
 436     }
 437 
 438     // Wait here until we get notified either when (a) there are no
 439     // more free regions coming or (b) some regions have been moved on
 440     // the secondary_free_list.
 441     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 442   }
 443 
 444   if (G1ConcRegionFreeingVerbose) {
 445     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 446                            "could not allocate from secondary_free_list");
 447   }
 448   return NULL;
 449 }
 450 
 451 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 452   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,


1466   // 32-bit size_t's.
1467   double used_after_gc_d = (double) used_after_gc;
1468   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1469   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1470 
1471   // Let's make sure that they are both under the max heap size, which
1472   // by default will make them fit into a size_t.
1473   double desired_capacity_upper_bound = (double) max_heap_size;
1474   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1475                                     desired_capacity_upper_bound);
1476   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1477                                     desired_capacity_upper_bound);
1478 
1479   // We can now safely turn them into size_t's.
1480   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1481   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1482 
1483   // This assert only makes sense here, before we adjust them
1484   // with respect to the min and max heap size.
1485   assert(minimum_desired_capacity <= maximum_desired_capacity,
1486          err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "
1487                  "maximum_desired_capacity = " SIZE_FORMAT,
1488                  minimum_desired_capacity, maximum_desired_capacity));
1489 
1490   // Should not be greater than the heap max size. No need to adjust
1491   // it with respect to the heap min size as it's a lower bound (i.e.,
1492   // we'll try to make the capacity larger than it, not smaller).
1493   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1494   // Should not be less than the heap min size. No need to adjust it
1495   // with respect to the heap max size as it's an upper bound (i.e.,
1496   // we'll try to make the capacity smaller than it, not greater).
1497   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1498 
1499   if (capacity_after_gc < minimum_desired_capacity) {
1500     // Don't expand unless it's significant
1501     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1502     ergo_verbose4(ErgoHeapSizing,
1503                   "attempt heap expansion",
1504                   ergo_format_reason("capacity lower than "
1505                                      "min desired capacity after Full GC")
1506                   ergo_format_byte("capacity")
1507                   ergo_format_byte("occupancy")


2126       assert(chr->is_continues_humongous(), "sanity");
2127       chr->reset_gc_time_stamp();
2128     }
2129   }
2130 }
2131 
2132 #ifndef PRODUCT
2133 
2134 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2135 private:
2136   unsigned _gc_time_stamp;
2137   bool _failures;
2138 
2139 public:
2140   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2141     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2142 
2143   virtual bool doHeapRegion(HeapRegion* hr) {
2144     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2145     if (_gc_time_stamp != region_gc_time_stamp) {
2146       gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2147                              "expected %d", HR_FORMAT_PARAMS(hr),
2148                              region_gc_time_stamp, _gc_time_stamp);
2149       _failures = true;
2150     }
2151     return false;
2152   }
2153 
2154   bool failures() { return _failures; }
2155 };
2156 
2157 void G1CollectedHeap::check_gc_time_stamps() {
2158   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2159   heap_region_iterate(&cl);
2160   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2161 }
2162 #endif // PRODUCT
2163 
2164 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2165                                                  DirtyCardQueue* into_cset_dcq,
2166                                                  bool concurrent,


2735 private:
2736   G1CollectedHeap* _g1h;
2737   VerifyOption     _vo;
2738   bool             _failures;
2739 public:
2740   // _vo == UsePrevMarking -> use "prev" marking information,
2741   // _vo == UseNextMarking -> use "next" marking information,
2742   // _vo == UseMarkWord    -> use mark word from object header.
2743   VerifyRootsClosure(VerifyOption vo) :
2744     _g1h(G1CollectedHeap::heap()),
2745     _vo(vo),
2746     _failures(false) { }
2747 
2748   bool failures() { return _failures; }
2749 
2750   template <class T> void do_oop_nv(T* p) {
2751     T heap_oop = oopDesc::load_heap_oop(p);
2752     if (!oopDesc::is_null(heap_oop)) {
2753       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2754       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2755         gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2756                                "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2757         if (_vo == VerifyOption_G1UseMarkWord) {
2758           gclog_or_tty->print_cr("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2759         }
2760         obj->print_on(gclog_or_tty);
2761         _failures = true;
2762       }
2763     }
2764   }
2765 
2766   void do_oop(oop* p)       { do_oop_nv(p); }
2767   void do_oop(narrowOop* p) { do_oop_nv(p); }
2768 };
2769 
2770 class G1VerifyCodeRootOopClosure: public OopClosure {
2771   G1CollectedHeap* _g1h;
2772   OopClosure* _root_cl;
2773   nmethod* _nm;
2774   VerifyOption _vo;
2775   bool _failures;
2776 
2777   template <class T> void do_oop_work(T* p) {
2778     // First verify that this root is live


2785 
2786     // Don't check the code roots during marking verification in a full GC
2787     if (_vo == VerifyOption_G1UseMarkWord) {
2788       return;
2789     }
2790 
2791     // Now verify that the current nmethod (which contains p) is
2792     // in the code root list of the heap region containing the
2793     // object referenced by p.
2794 
2795     T heap_oop = oopDesc::load_heap_oop(p);
2796     if (!oopDesc::is_null(heap_oop)) {
2797       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2798 
2799       // Now fetch the region containing the object
2800       HeapRegion* hr = _g1h->heap_region_containing(obj);
2801       HeapRegionRemSet* hrrs = hr->rem_set();
2802       // Verify that the strong code root list for this region
2803       // contains the nmethod
2804       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2805         gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2806                                "from nmethod " PTR_FORMAT " not in strong "
2807                                "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2808                                p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2809         _failures = true;
2810       }
2811     }
2812   }
2813 
2814 public:
2815   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2816     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2817 
2818   void do_oop(oop* p) { do_oop_work(p); }
2819   void do_oop(narrowOop* p) { do_oop_work(p); }
2820 
2821   void set_nmethod(nmethod* nm) { _nm = nm; }
2822   bool failures() { return _failures; }
2823 };
2824 
2825 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2826   G1VerifyCodeRootOopClosure* _oop_cl;
2827 


2934   VerifyRegionClosure(bool par, VerifyOption vo)
2935     : _par(par),
2936       _vo(vo),
2937       _failures(false) {}
2938 
2939   bool failures() {
2940     return _failures;
2941   }
2942 
2943   bool doHeapRegion(HeapRegion* r) {
2944     if (!r->is_continues_humongous()) {
2945       bool failures = false;
2946       r->verify(_vo, &failures);
2947       if (failures) {
2948         _failures = true;
2949       } else {
2950         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2951         r->object_iterate(&not_dead_yet_cl);
2952         if (_vo != VerifyOption_G1UseNextMarking) {
2953           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2954             gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
2955                                    "max_live_bytes " SIZE_FORMAT " "
2956                                    "< calculated " SIZE_FORMAT,
2957                                    p2i(r->bottom()), p2i(r->end()),
2958                                    r->max_live_bytes(),
2959                                  not_dead_yet_cl.live_bytes());
2960             _failures = true;
2961           }
2962         } else {
2963           // When vo == UseNextMarking we cannot currently do a sanity
2964           // check on the live bytes as the calculation has not been
2965           // finalized yet.
2966         }
2967       }
2968     }
2969     return false; // stop the region iteration if we hit a failure
2970   }
2971 };
2972 
2973 // This is the task used for parallel verification of the heap regions
2974 
2975 class G1ParVerifyTask: public AbstractGangTask {
2976 private:


3224   if (G1SummarizeConcMark) {
3225     concurrent_mark()->print_summary_info();
3226   }
3227   g1_policy()->print_yg_surv_rate_info();
3228 }
3229 
3230 #ifndef PRODUCT
3231 // Helpful for debugging RSet issues.
3232 
3233 class PrintRSetsClosure : public HeapRegionClosure {
3234 private:
3235   const char* _msg;
3236   size_t _occupied_sum;
3237 
3238 public:
3239   bool doHeapRegion(HeapRegion* r) {
3240     HeapRegionRemSet* hrrs = r->rem_set();
3241     size_t occupied = hrrs->occupied();
3242     _occupied_sum += occupied;
3243 
3244     gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3245                            HR_FORMAT_PARAMS(r));
3246     if (occupied == 0) {
3247       gclog_or_tty->print_cr("  RSet is empty");
3248     } else {
3249       hrrs->print();
3250     }
3251     gclog_or_tty->print_cr("----------");
3252     return false;
3253   }
3254 
3255   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3256     gclog_or_tty->cr();
3257     gclog_or_tty->print_cr("========================================");
3258     gclog_or_tty->print_cr("%s", msg);
3259     gclog_or_tty->cr();
3260   }
3261 
3262   ~PrintRSetsClosure() {
3263     gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3264     gclog_or_tty->print_cr("========================================");
3265     gclog_or_tty->cr();
3266   }
3267 };
3268 
3269 void G1CollectedHeap::print_cset_rsets() {
3270   PrintRSetsClosure cl("Printing CSet RSets");
3271   collection_set_iterate(&cl);
3272 }
3273 
3274 void G1CollectedHeap::print_all_rsets() {
3275   PrintRSetsClosure cl("Printing All RSets");;
3276   heap_region_iterate(&cl);
3277 }
3278 #endif // PRODUCT
3279 
3280 G1CollectedHeap* G1CollectedHeap::heap() {
3281   CollectedHeap* heap = Universe::heap();
3282   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3283   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");


4061 }
4062 
4063 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4064   _evac_failure_scan_stack->push(obj);
4065 }
4066 
4067 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4068   assert(_evac_failure_scan_stack != NULL, "precondition");
4069 
4070   while (_evac_failure_scan_stack->length() > 0) {
4071      oop obj = _evac_failure_scan_stack->pop();
4072      _evac_failure_closure->set_region(heap_region_containing(obj));
4073      obj->oop_iterate_backwards(_evac_failure_closure);
4074   }
4075 }
4076 
4077 oop
4078 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4079                                                oop old) {
4080   assert(obj_in_cs(old),
4081          err_msg("obj: " PTR_FORMAT " should still be in the CSet",
4082                  p2i(old)));
4083   markOop m = old->mark();
4084   oop forward_ptr = old->forward_to_atomic(old);
4085   if (forward_ptr == NULL) {
4086     // Forward-to-self succeeded.
4087     assert(_par_scan_state != NULL, "par scan state");
4088     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4089     uint queue_num = _par_scan_state->queue_num();
4090 
4091     _evacuation_failed = true;
4092     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4093     if (_evac_failure_closure != cl) {
4094       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4095       assert(!_drain_in_progress,
4096              "Should only be true while someone holds the lock.");
4097       // Set the global evac-failure closure to the current thread's.
4098       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4099       set_evac_failure_closure(cl);
4100       // Now do the common part.
4101       handle_evacuation_failure_common(old, m);
4102       // Reset to NULL.
4103       set_evac_failure_closure(NULL);
4104     } else {
4105       // The lock is already held, and this is recursive.
4106       assert(_drain_in_progress, "This should only be the recursive case.");
4107       handle_evacuation_failure_common(old, m);
4108     }
4109     return old;
4110   } else {
4111     // Forward-to-self failed. Either someone else managed to allocate
4112     // space for this object (old != forward_ptr) or they beat us in
4113     // self-forwarding it (old == forward_ptr).
4114     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4115            err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
4116                    "should not be in the CSet",
4117                    p2i(old), p2i(forward_ptr)));
4118     return forward_ptr;
4119   }
4120 }
4121 
4122 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4123   preserve_mark_if_necessary(old, m);
4124 
4125   HeapRegion* r = heap_region_containing(old);
4126   if (!r->evacuation_failed()) {
4127     r->set_evacuation_failed(true);
4128     _hr_printer.evac_failure(r);
4129   }
4130 
4131   push_on_evac_failure_scan_stack(old);
4132 
4133   if (!_drain_in_progress) {
4134     // prevent recursion in copy_to_survivor_space()
4135     _drain_in_progress = true;


4488     _initial_string_table_size = StringTable::the_table()->table_size();
4489     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4490     if (process_strings) {
4491       StringTable::clear_parallel_claimed_index();
4492     }
4493     if (process_symbols) {
4494       SymbolTable::clear_parallel_claimed_index();
4495     }
4496   }
4497 
4498   ~G1StringSymbolTableUnlinkTask() {
4499     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4500               err_msg("claim value %d after unlink less than initial string table size %d",
4501                       StringTable::parallel_claimed_index(), _initial_string_table_size));
4502     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4503               err_msg("claim value %d after unlink less than initial symbol table size %d",
4504                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4505 
4506     if (G1TraceStringSymbolTableScrubbing) {
4507       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4508                              "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4509                              "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4510                              strings_processed(), strings_removed(),
4511                              symbols_processed(), symbols_removed());
4512     }
4513   }
4514 
4515   void work(uint worker_id) {
4516     int strings_processed = 0;
4517     int strings_removed = 0;
4518     int symbols_processed = 0;
4519     int symbols_removed = 0;
4520     if (_process_strings) {
4521       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4522       Atomic::add(strings_processed, &_strings_processed);
4523       Atomic::add(strings_removed, &_strings_removed);
4524     }
4525     if (_process_symbols) {
4526       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4527       Atomic::add(symbols_processed, &_symbols_processed);
4528       Atomic::add(symbols_removed, &_symbols_removed);
4529     }


5612     ct_bs->verify_g1_young_region(mr);
5613   } else {
5614     ct_bs->verify_dirty_region(mr);
5615   }
5616 }
5617 
5618 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5619   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5620   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5621     verify_dirty_region(hr);
5622   }
5623 }
5624 
5625 void G1CollectedHeap::verify_dirty_young_regions() {
5626   verify_dirty_young_list(_young_list->first_region());
5627 }
5628 
5629 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5630                                                HeapWord* tams, HeapWord* end) {
5631   guarantee(tams <= end,
5632             err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)));
5633   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5634   if (result < end) {
5635     gclog_or_tty->cr();
5636     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5637                            bitmap_name, p2i(result));
5638     gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5639                            bitmap_name, p2i(tams), p2i(end));
5640     return false;
5641   }
5642   return true;
5643 }
5644 
5645 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5646   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5647   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5648 
5649   HeapWord* bottom = hr->bottom();
5650   HeapWord* ptams  = hr->prev_top_at_mark_start();
5651   HeapWord* ntams  = hr->next_top_at_mark_start();
5652   HeapWord* end    = hr->end();
5653 
5654   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5655 
5656   bool res_n = true;
5657   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5658   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5659   // if we happen to be in that state.
5660   if (mark_in_progress() || !_cmThread->in_progress()) {
5661     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5662   }
5663   if (!res_p || !res_n) {
5664     gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5665                            HR_FORMAT_PARAMS(hr));
5666     gclog_or_tty->print_cr("#### Caller: %s", caller);
5667     return false;
5668   }
5669   return true;
5670 }
5671 
5672 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5673   if (!G1VerifyBitmaps) return;
5674 
5675   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5676 }
5677 
5678 class G1VerifyBitmapClosure : public HeapRegionClosure {
5679 private:
5680   const char* _caller;
5681   G1CollectedHeap* _g1h;
5682   bool _failures;
5683 
5684 public:


5943     //
5944     // It is not required to check whether the object has been found dead by marking
5945     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5946     // all objects allocated during that time are considered live.
5947     // SATB marking is even more conservative than the remembered set.
5948     // So if at this point in the collection there is no remembered set entry,
5949     // nobody has a reference to it.
5950     // At the start of collection we flush all refinement logs, and remembered sets
5951     // are completely up-to-date wrt to references to the humongous object.
5952     //
5953     // Other implementation considerations:
5954     // - never consider object arrays at this time because they would pose
5955     // considerable effort for cleaning up the the remembered sets. This is
5956     // required because stale remembered sets might reference locations that
5957     // are currently allocated into.
5958     uint region_idx = r->hrm_index();
5959     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5960         !r->rem_set()->is_empty()) {
5961 
5962       if (G1TraceEagerReclaimHumongousObjects) {
5963         gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5964                                region_idx,
5965                                (size_t)obj->size() * HeapWordSize,
5966                                p2i(r->bottom()),
5967                                r->region_num(),
5968                                r->rem_set()->occupied(),
5969                                r->rem_set()->strong_code_roots_list_length(),
5970                                next_bitmap->isMarked(r->bottom()),
5971                                g1h->is_humongous_reclaim_candidate(region_idx),
5972                                obj->is_typeArray()
5973                               );
5974       }
5975 
5976       return false;
5977     }
5978 
5979     guarantee(obj->is_typeArray(),
5980               err_msg("Only eagerly reclaiming type arrays is supported, but the object "
5981                       PTR_FORMAT " is not.",
5982                       p2i(r->bottom())));
5983 
5984     if (G1TraceEagerReclaimHumongousObjects) {
5985       gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5986                              region_idx,
5987                              (size_t)obj->size() * HeapWordSize,
5988                              p2i(r->bottom()),
5989                              r->region_num(),
5990                              r->rem_set()->occupied(),
5991                              r->rem_set()->strong_code_roots_list_length(),
5992                              next_bitmap->isMarked(r->bottom()),
5993                              g1h->is_humongous_reclaim_candidate(region_idx),
5994                              obj->is_typeArray()
5995                             );
5996     }
5997     // Need to clear mark bit of the humongous object if already set.
5998     if (next_bitmap->isMarked(r->bottom())) {
5999       next_bitmap->clear(r->bottom());
6000     }
6001     _freed_bytes += r->used();
6002     r->set_containing_set(NULL);
6003     _humongous_regions_removed.increment(1u, r->capacity());
6004     g1h->free_humongous_region(r, _free_region_list, false);
6005 


6119     }
6120   }
6121 
6122   if (G1ConcRegionFreeingVerbose) {
6123     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6124                            "done waiting for free regions");
6125   }
6126 }
6127 
6128 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6129   _young_list->push_region(hr);
6130 }
6131 
6132 class NoYoungRegionsClosure: public HeapRegionClosure {
6133 private:
6134   bool _success;
6135 public:
6136   NoYoungRegionsClosure() : _success(true) { }
6137   bool doHeapRegion(HeapRegion* r) {
6138     if (r->is_young()) {
6139       gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
6140                              p2i(r->bottom()), p2i(r->end()));
6141       _success = false;
6142     }
6143     return false;
6144   }
6145   bool success() { return _success; }
6146 };
6147 
6148 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6149   bool ret = _young_list->check_list_empty(check_sample);
6150 
6151   if (check_heap) {
6152     NoYoungRegionsClosure closure;
6153     heap_region_iterate(&closure);
6154     ret = ret && closure.success();
6155   }
6156 
6157   return ret;
6158 }
6159 


6248   size_t total_used() {
6249     return _total_used;
6250   }
6251 };
6252 
6253 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6254   assert_at_safepoint(true /* should_be_vm_thread */);
6255 
6256   if (!free_list_only) {
6257     _young_list->empty_list();
6258   }
6259 
6260   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6261   heap_region_iterate(&cl);
6262 
6263   if (!free_list_only) {
6264     _allocator->set_used(cl.total_used());
6265   }
6266   assert(_allocator->used_unlocked() == recalculate_used(),
6267          err_msg("inconsistent _allocator->used_unlocked(), "
6268                  "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
6269                  _allocator->used_unlocked(), recalculate_used()));
6270 }
6271 
6272 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6273   _refine_cte_cl->set_concurrent(concurrent);
6274 }
6275 
6276 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6277   HeapRegion* hr = heap_region_containing(p);
6278   return hr->is_in(p);
6279 }
6280 
6281 // Methods for the mutator alloc region
6282 
6283 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6284                                                       bool force) {
6285   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6286   assert(!force || g1_policy()->can_expand_young_list(),
6287          "if force is true we should be able to expand the young list");
6288   bool young_list_full = g1_policy()->is_young_list_full();


6468   // Finally, make sure that the region accounting in the lists is
6469   // consistent with what we see in the heap.
6470 
6471   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6472   heap_region_iterate(&cl);
6473   cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6474 }
6475 
6476 // Optimized nmethod scanning
6477 
6478 class RegisterNMethodOopClosure: public OopClosure {
6479   G1CollectedHeap* _g1h;
6480   nmethod* _nm;
6481 
6482   template <class T> void do_oop_work(T* p) {
6483     T heap_oop = oopDesc::load_heap_oop(p);
6484     if (!oopDesc::is_null(heap_oop)) {
6485       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6486       HeapRegion* hr = _g1h->heap_region_containing(obj);
6487       assert(!hr->is_continues_humongous(),
6488              err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6489                      " starting at " HR_FORMAT,
6490                      p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6491 
6492       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6493       hr->add_strong_code_root_locked(_nm);
6494     }
6495   }
6496 
6497 public:
6498   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6499     _g1h(g1h), _nm(nm) {}
6500 
6501   void do_oop(oop* p)       { do_oop_work(p); }
6502   void do_oop(narrowOop* p) { do_oop_work(p); }
6503 };
6504 
6505 class UnregisterNMethodOopClosure: public OopClosure {
6506   G1CollectedHeap* _g1h;
6507   nmethod* _nm;
6508 
6509   template <class T> void do_oop_work(T* p) {
6510     T heap_oop = oopDesc::load_heap_oop(p);
6511     if (!oopDesc::is_null(heap_oop)) {
6512       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6513       HeapRegion* hr = _g1h->heap_region_containing(obj);
6514       assert(!hr->is_continues_humongous(),
6515              err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6516                      " starting at " HR_FORMAT,
6517                      p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6518 
6519       hr->remove_strong_code_root(_nm);
6520     }
6521   }
6522 
6523 public:
6524   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6525     _g1h(g1h), _nm(nm) {}
6526 
6527   void do_oop(oop* p)       { do_oop_work(p); }
6528   void do_oop(narrowOop* p) { do_oop_work(p); }
6529 };
6530 
6531 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6532   CollectedHeap::register_nmethod(nm);
6533 
6534   guarantee(nm != NULL, "sanity");
6535   RegisterNMethodOopClosure reg_cl(this, nm);
6536   nm->oops_do(&reg_cl);


< prev index next >