< prev index next >

src/share/vm/gc/g1/heapRegionRemSet.cpp

Print this page




  73   {}
  74 
  75   void add_card_work(CardIdx_t from_card, bool par) {
  76     if (!_bm.at(from_card)) {
  77       if (par) {
  78         if (_bm.par_at_put(from_card, 1)) {
  79           Atomic::inc(&_occupied);
  80         }
  81       } else {
  82         _bm.at_put(from_card, 1);
  83         _occupied++;
  84       }
  85     }
  86   }
  87 
  88   void add_reference_work(OopOrNarrowOopStar from, bool par) {
  89     // Must make this robust in case "from" is not in "_hr", because of
  90     // concurrency.
  91 
  92     if (G1TraceHeapRegionRememberedSet) {
  93       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
  94                              p2i(from),
  95                              UseCompressedOops
  96                              ? p2i(oopDesc::load_decode_heap_oop((narrowOop*)from))
  97                              : p2i(oopDesc::load_decode_heap_oop((oop*)from)));
  98     }
  99 
 100     HeapRegion* loc_hr = hr();
 101     // If the test below fails, then this table was reused concurrently
 102     // with this operation.  This is OK, since the old table was coarsened,
 103     // and adding a bit to the new table is never incorrect.
 104     // If the table used to belong to a continues humongous region and is
 105     // now reused for the corresponding start humongous region, we need to
 106     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 107     // instead of just is_in_reserved() here.
 108     if (loc_hr->is_in_reserved_raw(from)) {
 109       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 110       CardIdx_t from_card = (CardIdx_t)
 111           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 112 
 113       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,


 359          "just checking");
 360 }
 361 
 362 int**  FromCardCache::_cache = NULL;
 363 uint   FromCardCache::_max_regions = 0;
 364 size_t FromCardCache::_static_mem_size = 0;
 365 
 366 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
 367   guarantee(_cache == NULL, "Should not call this multiple times");
 368 
 369   _max_regions = max_num_regions;
 370   _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
 371                                                        _max_regions,
 372                                                        &_static_mem_size);
 373 
 374   invalidate(0, _max_regions);
 375 }
 376 
 377 void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
 378   guarantee((size_t)start_idx + new_num_regions <= max_uintx,
 379             err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
 380                     start_idx, new_num_regions));
 381   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 382     uint end_idx = (start_idx + (uint)new_num_regions);
 383     assert(end_idx <= _max_regions, "Must be within max.");
 384     for (uint j = start_idx; j < end_idx; j++) {
 385       set(i, j, InvalidCard);
 386     }
 387   }
 388 }
 389 
 390 #ifndef PRODUCT
 391 void FromCardCache::print(outputStream* out) {
 392   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 393     for (uint j = 0; j < _max_regions; j++) {
 394       out->print_cr("_from_card_cache[%u][%u] = %d.",
 395                     i, j, at(i, j));
 396     }
 397   }
 398 }
 399 #endif


 613                  _n_coarse_entries);
 614     }
 615   }
 616 
 617   // Unsplice.
 618   *max_prev = max->collision_list_next();
 619   Atomic::inc(&_n_coarsenings);
 620   _n_fine_entries--;
 621   return max;
 622 }
 623 
 624 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
 625                               BitMap* region_bm, BitMap* card_bm) {
 626   // First eliminated garbage regions from the coarse map.
 627   if (G1RSScrubVerbose) {
 628     gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
 629   }
 630 
 631   assert(_coarse_map.size() == region_bm->size(), "Precondition");
 632   if (G1RSScrubVerbose) {
 633     gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
 634                         _n_coarse_entries);
 635   }
 636   _coarse_map.set_intersection(*region_bm);
 637   _n_coarse_entries = _coarse_map.count_one_bits();
 638   if (G1RSScrubVerbose) {
 639     gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
 640   }
 641 
 642   // Now do the fine-grained maps.
 643   for (size_t i = 0; i < _max_fine_entries; i++) {
 644     PerRegionTable* cur = _fine_grain_regions[i];
 645     PerRegionTable** prev = &_fine_grain_regions[i];
 646     while (cur != NULL) {
 647       PerRegionTable* nxt = cur->collision_list_next();
 648       // If the entire region is dead, eliminate.
 649       if (G1RSScrubVerbose) {
 650         gclog_or_tty->print_cr("     For other region %u:",
 651                                cur->hr()->hrm_index());
 652       }
 653       if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
 654         *prev = nxt;
 655         cur->set_collision_list_next(NULL);
 656         _n_fine_entries--;
 657         if (G1RSScrubVerbose) {
 658           gclog_or_tty->print_cr("          deleted via region map.");
 659         }


 996 }
 997 
 998 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
 999   if (fine_has_next()) {
1000     _cur_card_in_prt =
1001       _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1002   }
1003   if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1004     // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1005     // the remembered set.
1006     if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1007       return false;
1008     }
1009     PerRegionTable* next_prt = _fine_cur_prt->next();
1010     switch_to_prt(next_prt);
1011     _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1012   }
1013 
1014   card_index = _cur_region_card_offset + _cur_card_in_prt;
1015   guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1016             err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
1017   return true;
1018 }
1019 
1020 bool HeapRegionRemSetIterator::fine_has_next() {
1021   return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1022 }
1023 
1024 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1025   assert(prt != NULL, "Cannot switch to NULL prt");
1026   _fine_cur_prt = prt;
1027 
1028   HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1029   _cur_region_card_offset = _bosa->index_for(r_bot);
1030 
1031   // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1032   // To avoid special-casing this start case, and not miss the first bitmap
1033   // entry, initialize _cur_region_cur_card with -1 instead of 0.
1034   _cur_card_in_prt = (size_t)-1;
1035 }
1036 


1165 
1166 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1167   SparsePRT::reset_for_cleanup_tasks();
1168 }
1169 
1170 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1171   _other_regions.do_cleanup_work(hrrs_cleanup_task);
1172 }
1173 
1174 void
1175 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1176   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1177 }
1178 
1179 #ifndef PRODUCT
1180 void PerRegionTable::test_fl_mem_size() {
1181   PerRegionTable* dummy = alloc(NULL);
1182 
1183   size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1184   assert(dummy->mem_size() > min_prt_size,
1185          err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
1186                  "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
1187   free(dummy);
1188   guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1189   // try to reset the state
1190   _free_list = NULL;
1191   delete dummy;
1192 }
1193 
1194 void HeapRegionRemSet::test_prt() {
1195   PerRegionTable::test_fl_mem_size();
1196 }
1197 
1198 void HeapRegionRemSet::test() {
1199   os::sleep(Thread::current(), (jlong)5000, false);
1200   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1201 
1202   // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1203   // hash bucket.
1204   HeapRegion* hr0 = g1h->region_at(0);
1205   HeapRegion* hr1 = g1h->region_at(1);
1206   HeapRegion* hr2 = g1h->region_at(5);




  73   {}
  74 
  75   void add_card_work(CardIdx_t from_card, bool par) {
  76     if (!_bm.at(from_card)) {
  77       if (par) {
  78         if (_bm.par_at_put(from_card, 1)) {
  79           Atomic::inc(&_occupied);
  80         }
  81       } else {
  82         _bm.at_put(from_card, 1);
  83         _occupied++;
  84       }
  85     }
  86   }
  87 
  88   void add_reference_work(OopOrNarrowOopStar from, bool par) {
  89     // Must make this robust in case "from" is not in "_hr", because of
  90     // concurrency.
  91 
  92     if (G1TraceHeapRegionRememberedSet) {
  93       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
  94                              p2i(from),
  95                              UseCompressedOops
  96                              ? p2i(oopDesc::load_decode_heap_oop((narrowOop*)from))
  97                              : p2i(oopDesc::load_decode_heap_oop((oop*)from)));
  98     }
  99 
 100     HeapRegion* loc_hr = hr();
 101     // If the test below fails, then this table was reused concurrently
 102     // with this operation.  This is OK, since the old table was coarsened,
 103     // and adding a bit to the new table is never incorrect.
 104     // If the table used to belong to a continues humongous region and is
 105     // now reused for the corresponding start humongous region, we need to
 106     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 107     // instead of just is_in_reserved() here.
 108     if (loc_hr->is_in_reserved_raw(from)) {
 109       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 110       CardIdx_t from_card = (CardIdx_t)
 111           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 112 
 113       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,


 359          "just checking");
 360 }
 361 
 362 int**  FromCardCache::_cache = NULL;
 363 uint   FromCardCache::_max_regions = 0;
 364 size_t FromCardCache::_static_mem_size = 0;
 365 
 366 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
 367   guarantee(_cache == NULL, "Should not call this multiple times");
 368 
 369   _max_regions = max_num_regions;
 370   _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
 371                                                        _max_regions,
 372                                                        &_static_mem_size);
 373 
 374   invalidate(0, _max_regions);
 375 }
 376 
 377 void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
 378   guarantee((size_t)start_idx + new_num_regions <= max_uintx,
 379             err_msg("Trying to invalidate beyond maximum region, from %u size " SIZE_FORMAT,
 380                     start_idx, new_num_regions));
 381   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 382     uint end_idx = (start_idx + (uint)new_num_regions);
 383     assert(end_idx <= _max_regions, "Must be within max.");
 384     for (uint j = start_idx; j < end_idx; j++) {
 385       set(i, j, InvalidCard);
 386     }
 387   }
 388 }
 389 
 390 #ifndef PRODUCT
 391 void FromCardCache::print(outputStream* out) {
 392   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 393     for (uint j = 0; j < _max_regions; j++) {
 394       out->print_cr("_from_card_cache[%u][%u] = %d.",
 395                     i, j, at(i, j));
 396     }
 397   }
 398 }
 399 #endif


 613                  _n_coarse_entries);
 614     }
 615   }
 616 
 617   // Unsplice.
 618   *max_prev = max->collision_list_next();
 619   Atomic::inc(&_n_coarsenings);
 620   _n_fine_entries--;
 621   return max;
 622 }
 623 
 624 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
 625                               BitMap* region_bm, BitMap* card_bm) {
 626   // First eliminated garbage regions from the coarse map.
 627   if (G1RSScrubVerbose) {
 628     gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
 629   }
 630 
 631   assert(_coarse_map.size() == region_bm->size(), "Precondition");
 632   if (G1RSScrubVerbose) {
 633     gclog_or_tty->print("   Coarse map: before = " SIZE_FORMAT "...",
 634                         _n_coarse_entries);
 635   }
 636   _coarse_map.set_intersection(*region_bm);
 637   _n_coarse_entries = _coarse_map.count_one_bits();
 638   if (G1RSScrubVerbose) {
 639     gclog_or_tty->print_cr("   after = " SIZE_FORMAT ".", _n_coarse_entries);
 640   }
 641 
 642   // Now do the fine-grained maps.
 643   for (size_t i = 0; i < _max_fine_entries; i++) {
 644     PerRegionTable* cur = _fine_grain_regions[i];
 645     PerRegionTable** prev = &_fine_grain_regions[i];
 646     while (cur != NULL) {
 647       PerRegionTable* nxt = cur->collision_list_next();
 648       // If the entire region is dead, eliminate.
 649       if (G1RSScrubVerbose) {
 650         gclog_or_tty->print_cr("     For other region %u:",
 651                                cur->hr()->hrm_index());
 652       }
 653       if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
 654         *prev = nxt;
 655         cur->set_collision_list_next(NULL);
 656         _n_fine_entries--;
 657         if (G1RSScrubVerbose) {
 658           gclog_or_tty->print_cr("          deleted via region map.");
 659         }


 996 }
 997 
 998 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
 999   if (fine_has_next()) {
1000     _cur_card_in_prt =
1001       _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1002   }
1003   if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1004     // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1005     // the remembered set.
1006     if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1007       return false;
1008     }
1009     PerRegionTable* next_prt = _fine_cur_prt->next();
1010     switch_to_prt(next_prt);
1011     _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1012   }
1013 
1014   card_index = _cur_region_card_offset + _cur_card_in_prt;
1015   guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1016             err_msg("Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt));
1017   return true;
1018 }
1019 
1020 bool HeapRegionRemSetIterator::fine_has_next() {
1021   return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1022 }
1023 
1024 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1025   assert(prt != NULL, "Cannot switch to NULL prt");
1026   _fine_cur_prt = prt;
1027 
1028   HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1029   _cur_region_card_offset = _bosa->index_for(r_bot);
1030 
1031   // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1032   // To avoid special-casing this start case, and not miss the first bitmap
1033   // entry, initialize _cur_region_cur_card with -1 instead of 0.
1034   _cur_card_in_prt = (size_t)-1;
1035 }
1036 


1165 
1166 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1167   SparsePRT::reset_for_cleanup_tasks();
1168 }
1169 
1170 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1171   _other_regions.do_cleanup_work(hrrs_cleanup_task);
1172 }
1173 
1174 void
1175 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1176   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1177 }
1178 
1179 #ifndef PRODUCT
1180 void PerRegionTable::test_fl_mem_size() {
1181   PerRegionTable* dummy = alloc(NULL);
1182 
1183   size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1184   assert(dummy->mem_size() > min_prt_size,
1185          err_msg("PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
1186                  "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size));
1187   free(dummy);
1188   guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1189   // try to reset the state
1190   _free_list = NULL;
1191   delete dummy;
1192 }
1193 
1194 void HeapRegionRemSet::test_prt() {
1195   PerRegionTable::test_fl_mem_size();
1196 }
1197 
1198 void HeapRegionRemSet::test() {
1199   os::sleep(Thread::current(), (jlong)5000, false);
1200   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1201 
1202   // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1203   // hash bucket.
1204   HeapRegion* hr0 = g1h->region_at(0);
1205   HeapRegion* hr1 = g1h->region_at(1);
1206   HeapRegion* hr2 = g1h->region_at(5);


< prev index next >