src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Print this page
rev 5920 : 8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by:
   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 242   static size_t fl_mem_size() {
 243     PerRegionTable* cur = _free_list;
 244     size_t res = 0;
 245     while (cur != NULL) {
 246       res += cur->mem_size();
 247       cur = cur->next();
 248     }
 249     return res;
 250   }
 251 
 252   static void test_fl_mem_size();
 253 };
 254 
 255 PerRegionTable* PerRegionTable::_free_list = NULL;
 256 
 257 size_t OtherRegionsTable::_max_fine_entries = 0;
 258 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
 259 size_t OtherRegionsTable::_fine_eviction_stride = 0;
 260 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
 261 
 262 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
 263   _g1h(G1CollectedHeap::heap()),
 264   _m(Mutex::leaf, "An OtherRegionsTable lock", true),
 265   _hr(hr),
 266   _coarse_map(G1CollectedHeap::heap()->max_regions(),
 267               false /* in-resource-area */),
 268   _fine_grain_regions(NULL),
 269   _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
 270   _n_fine_entries(0), _n_coarse_entries(0),
 271   _fine_eviction_start(0),
 272   _sparse_table(hr)
 273 {
 274   typedef PerRegionTable* PerRegionTablePtr;
 275 
 276   if (_max_fine_entries == 0) {
 277     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
 278     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
 279     _max_fine_entries = (size_t)1 << max_entries_log;
 280     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 281 
 282     assert(_fine_eviction_sample_size == 0
 283            && _fine_eviction_stride == 0, "All init at same time.");
 284     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
 285     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;


 425     _from_card_cache[tid][cur_hrs_ind] = from_card;
 426   }
 427 
 428   // Note that this may be a continued H region.
 429   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
 430   RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
 431 
 432   // If the region is already coarsened, return.
 433   if (_coarse_map.at(from_hrs_ind)) {
 434     if (G1TraceHeapRegionRememberedSet) {
 435       gclog_or_tty->print_cr("  coarse map hit.");
 436     }
 437     assert(contains_reference(from), "We just added it!");
 438     return;
 439   }
 440 
 441   // Otherwise find a per-region table to add it to.
 442   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
 443   PerRegionTable* prt = find_region_table(ind, from_hr);
 444   if (prt == NULL) {
 445     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
 446     // Confirm that it's really not there...
 447     prt = find_region_table(ind, from_hr);
 448     if (prt == NULL) {
 449 
 450       uintptr_t from_hr_bot_card_index =
 451         uintptr_t(from_hr->bottom())
 452           >> CardTableModRefBS::card_shift;
 453       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 454       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 455              "Must be in range.");
 456       if (G1HRRSUseSparseTable &&
 457           _sparse_table.add_card(from_hrs_ind, card_index)) {
 458         if (G1RecordHRRSOops) {
 459           HeapRegionRemSet::record(hr(), from);
 460           if (G1TraceHeapRegionRememberedSet) {
 461             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
 462                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 463                                 align_size_down(uintptr_t(from),
 464                                                 CardTableModRefBS::card_size),
 465                                 hr()->bottom(), from);


 527                           hr()->bottom(), from);
 528     }
 529   }
 530   assert(contains_reference(from), "We just added it!");
 531 }
 532 
 533 PerRegionTable*
 534 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
 535   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
 536   PerRegionTable* prt = _fine_grain_regions[ind];
 537   while (prt != NULL && prt->hr() != hr) {
 538     prt = prt->collision_list_next();
 539   }
 540   // Loop postcondition is the method postcondition.
 541   return prt;
 542 }
 543 
 544 jint OtherRegionsTable::_n_coarsenings = 0;
 545 
 546 PerRegionTable* OtherRegionsTable::delete_region_table() {
 547   assert(_m.owned_by_self(), "Precondition");
 548   assert(_n_fine_entries == _max_fine_entries, "Precondition");
 549   PerRegionTable* max = NULL;
 550   jint max_occ = 0;
 551   PerRegionTable** max_prev;
 552   size_t max_ind;
 553 
 554   size_t i = _fine_eviction_start;
 555   for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
 556     size_t ii = i;
 557     // Make sure we get a non-NULL sample.
 558     while (_fine_grain_regions[ii] == NULL) {
 559       ii++;
 560       if (ii == _max_fine_entries) ii = 0;
 561       guarantee(ii != i, "We must find one.");
 562     }
 563     PerRegionTable** prev = &_fine_grain_regions[ii];
 564     PerRegionTable* cur = *prev;
 565     while (cur != NULL) {
 566       jint cur_occ = cur->occupied();
 567       if (max == NULL || cur_occ > max_occ) {


 659         if (cur->occupied() == 0) {
 660           *prev = nxt;
 661           cur->set_collision_list_next(NULL);
 662           _n_fine_entries--;
 663           unlink_from_all(cur);
 664           PerRegionTable::free(cur);
 665         } else {
 666           prev = cur->collision_list_next_addr();
 667         }
 668       }
 669       cur = nxt;
 670     }
 671   }
 672   // Since we may have deleted a from_card_cache entry from the RS, clear
 673   // the FCC.
 674   clear_fcc();
 675 }
 676 
 677 
 678 size_t OtherRegionsTable::occupied() const {
 679   // Cast away const in this case.
 680   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
 681   size_t sum = occ_fine();
 682   sum += occ_sparse();
 683   sum += occ_coarse();
 684   return sum;
 685 }
 686 
 687 size_t OtherRegionsTable::occ_fine() const {
 688   size_t sum = 0;
 689 
 690   size_t num = 0;
 691   PerRegionTable * cur = _first_all_fine_prts;
 692   while (cur != NULL) {
 693     sum += cur->occupied();
 694     cur = cur->next();
 695     num++;
 696   }
 697   guarantee(num == _n_fine_entries, "just checking");
 698   return sum;
 699 }
 700 
 701 size_t OtherRegionsTable::occ_coarse() const {
 702   return (_n_coarse_entries * HeapRegion::CardsPerRegion);
 703 }
 704 
 705 size_t OtherRegionsTable::occ_sparse() const {
 706   return _sparse_table.occupied();
 707 }
 708 
 709 size_t OtherRegionsTable::mem_size() const {
 710   // Cast away const in this case.
 711   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
 712   size_t sum = 0;
 713   // all PRTs are of the same size so it is sufficient to query only one of them.
 714   if (_first_all_fine_prts != NULL) {
 715     assert(_last_all_fine_prts != NULL &&
 716       _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
 717     sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
 718   }
 719   sum += (sizeof(PerRegionTable*) * _max_fine_entries);
 720   sum += (_coarse_map.size_in_words() * HeapWordSize);
 721   sum += (_sparse_table.mem_size());
 722   sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
 723   return sum;
 724 }
 725 
 726 size_t OtherRegionsTable::static_mem_size() {
 727   return _from_card_cache_mem_size;
 728 }
 729 
 730 size_t OtherRegionsTable::fl_mem_size() {
 731   return PerRegionTable::fl_mem_size();
 732 }
 733 
 734 void OtherRegionsTable::clear_fcc() {
 735   size_t hrs_idx = hr()->hrs_index();
 736   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 737     _from_card_cache[i][hrs_idx] = -1;
 738   }
 739 }
 740 
 741 void OtherRegionsTable::clear() {
 742   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
 743   // if there are no entries, skip this step
 744   if (_first_all_fine_prts != NULL) {
 745     guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
 746     PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
 747     memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
 748   } else {
 749     guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
 750   }
 751 
 752   _first_all_fine_prts = _last_all_fine_prts = NULL;
 753   _sparse_table.clear();
 754   _coarse_map.clear();
 755   _n_fine_entries = 0;
 756   _n_coarse_entries = 0;
 757 
 758   clear_fcc();
 759 }
 760 
 761 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
 762   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
 763   size_t hrs_ind = (size_t) from_hr->hrs_index();
 764   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
 765   if (del_single_region_table(ind, from_hr)) {
 766     assert(!_coarse_map.at(hrs_ind), "Inv");
 767   } else {
 768     _coarse_map.par_at_put(hrs_ind, 0);
 769   }
 770   // Check to see if any of the fcc entries come from here.
 771   size_t hr_ind = (size_t) hr()->hrs_index();
 772   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
 773     int fcc_ent = _from_card_cache[tid][hr_ind];
 774     if (fcc_ent != -1) {
 775       HeapWord* card_addr = (HeapWord*)
 776         (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
 777       if (hr()->is_in_reserved(card_addr)) {
 778         // Clear the from card cache.
 779         _from_card_cache[tid][hr_ind] = -1;
 780       }
 781     }
 782   }


 788   PerRegionTable** prev_addr = &_fine_grain_regions[ind];
 789   PerRegionTable* prt = *prev_addr;
 790   while (prt != NULL && prt->hr() != hr) {
 791     prev_addr = prt->collision_list_next_addr();
 792     prt = prt->collision_list_next();
 793   }
 794   if (prt != NULL) {
 795     assert(prt->hr() == hr, "Loop postcondition.");
 796     *prev_addr = prt->collision_list_next();
 797     unlink_from_all(prt);
 798     PerRegionTable::free(prt);
 799     _n_fine_entries--;
 800     return true;
 801   } else {
 802     return false;
 803   }
 804 }
 805 
 806 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
 807   // Cast away const in this case.
 808   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
 809   return contains_reference_locked(from);
 810 }
 811 
 812 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
 813   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
 814   if (hr == NULL) return false;
 815   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
 816   // Is this region in the coarse map?
 817   if (_coarse_map.at(hr_ind)) return true;
 818 
 819   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
 820                                      hr);
 821   if (prt != NULL) {
 822     return prt->contains_reference(from);
 823 
 824   } else {
 825     uintptr_t from_card =
 826       (uintptr_t(from) >> CardTableModRefBS::card_shift);
 827     uintptr_t hr_bot_card_index =
 828       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;


 833     return _sparse_table.contains_card(hr_ind, card_index);
 834   }
 835 
 836 
 837 }
 838 
 839 void
 840 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 841   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 842 }
 843 
 844 // Determines how many threads can add records to an rset in parallel.
 845 // This can be done by either mutator threads together with the
 846 // concurrent refinement threads or GC threads.
 847 int HeapRegionRemSet::num_par_rem_sets() {
 848   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
 849 }
 850 
 851 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 852                                    HeapRegion* hr)
 853   : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {

 854   reset_for_par_iteration();
 855 }
 856 
 857 void HeapRegionRemSet::setup_remset_size() {
 858   // Setup sparse and fine-grain tables sizes.
 859   // table_size = base * (log(region_size / 1M) + 1)
 860   const int LOG_M = 20;
 861   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 862   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 863     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 864   }
 865   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 866     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 867   }
 868   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 869 }
 870 
 871 bool HeapRegionRemSet::claim_iter() {
 872   if (_iter_state != Unclaimed) return false;
 873   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
 874   return (res == Unclaimed);
 875 }
 876 
 877 void HeapRegionRemSet::set_iter_complete() {
 878   _iter_state = Complete;
 879 }
 880 
 881 bool HeapRegionRemSet::iter_is_complete() {
 882   return _iter_state == Complete;
 883 }
 884 
 885 #ifndef PRODUCT
 886 void HeapRegionRemSet::print() const {
 887   HeapRegionRemSetIterator iter(this);
 888   size_t card_index;
 889   while (iter.has_next(card_index)) {
 890     HeapWord* card_start =
 891       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
 892     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
 893   }
 894   if (iter.n_yielded() != occupied()) {
 895     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
 896     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
 897                   iter.n_yielded(),
 898                   iter.n_yielded_coarse(), iter.n_yielded_fine());
 899     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
 900                   occupied(), occ_coarse(), occ_fine());
 901   }
 902   guarantee(iter.n_yielded() == occupied(),
 903             "We should have yielded all the represented cards.");
 904 }
 905 #endif
 906 
 907 void HeapRegionRemSet::cleanup() {
 908   SparsePRT::cleanup_all();
 909 }
 910 
 911 void HeapRegionRemSet::clear() {
 912   if (_strong_code_roots_list != NULL) {
 913     delete _strong_code_roots_list;
 914   }
 915   _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
 916                                 GrowableArray<nmethod*>(10, 0, NULL, true);
 917 


 918   _other_regions.clear();
 919   assert(occupied() == 0, "Should be clear.");
 920   reset_for_par_iteration();
 921 }
 922 
 923 void HeapRegionRemSet::reset_for_par_iteration() {
 924   _iter_state = Unclaimed;
 925   _iter_claimed = 0;
 926   // It's good to check this to make sure that the two methods are in sync.
 927   assert(verify_ready_for_par_iteration(), "post-condition");
 928 }
 929 
 930 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
 931                              BitMap* region_bm, BitMap* card_bm) {
 932   _other_regions.scrub(ctbs, region_bm, card_bm);
 933 }
 934 
 935 
 936 // Code roots support
 937 
 938 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
 939   assert(nm != NULL, "sanity");
 940   // Search for the code blob from the RHS to avoid
 941   // duplicate entries as much as possible
 942   if (_strong_code_roots_list->find_from_end(nm) < 0) {
 943     // Code blob isn't already in the list
 944     _strong_code_roots_list->push(nm);
 945   }
 946 }
 947 
 948 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
 949   assert(nm != NULL, "sanity");
 950   int idx = _strong_code_roots_list->find(nm);
 951   if (idx >= 0) {
 952     _strong_code_roots_list->remove_at(idx);
 953   }
 954   // Check that there were no duplicates
 955   guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
 956 }
 957 
 958 class NMethodMigrationOopClosure : public OopClosure {
 959   G1CollectedHeap* _g1h;
 960   HeapRegion* _from;
 961   nmethod* _nm;
 962 
 963   uint _num_self_forwarded;
 964 
 965   template <class T> void do_oop_work(T* p) {
 966     T heap_oop = oopDesc::load_heap_oop(p);
 967     if (!oopDesc::is_null(heap_oop)) {
 968       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 969       if (_from->is_in(obj)) {
 970         // Reference still points into the source region.
 971         // Since roots are immediately evacuated this means that
 972         // we must have self forwarded the object
 973         assert(obj->is_forwarded(),
 974                err_msg("code roots should be immediately evacuated. "
 975                        "Ref: "PTR_FORMAT", "


 997     _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
 998 
 999   void do_oop(narrowOop* p) { do_oop_work(p); }
1000   void do_oop(oop* p)       { do_oop_work(p); }
1001 
1002   uint retain() { return _num_self_forwarded > 0; }
1003 };
1004 
1005 void HeapRegionRemSet::migrate_strong_code_roots() {
1006   assert(hr()->in_collection_set(), "only collection set regions");
1007   assert(!hr()->isHumongous(),
1008          err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
1009                  HR_FORMAT_PARAMS(hr())));
1010 
1011   ResourceMark rm;
1012 
1013   // List of code blobs to retain for this region
1014   GrowableArray<nmethod*> to_be_retained(10);
1015   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1016 
1017   while (_strong_code_roots_list->is_nonempty()) {
1018     nmethod *nm = _strong_code_roots_list->pop();
1019     if (nm != NULL) {
1020       NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
1021       nm->oops_do(&oop_cl);
1022       if (oop_cl.retain()) {
1023         to_be_retained.push(nm);
1024       }
1025     }
1026   }
1027 
1028   // Now push any code roots we need to retain
1029   assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
1030          "Retained nmethod list must be empty or "
1031          "evacuation of this region failed");
1032 
1033   while (to_be_retained.is_nonempty()) {
1034     nmethod* nm = to_be_retained.pop();
1035     assert(nm != NULL, "sanity");
1036     add_strong_code_root(nm);
1037   }
1038 }
1039 
1040 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1041   for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
1042     nmethod* nm = _strong_code_roots_list->at(i);
1043     blk->do_code_blob(nm);
1044   }
1045 }
1046 
1047 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1048   return sizeof(GrowableArray<nmethod*>) +
1049          _strong_code_roots_list->max_length() * sizeof(nmethod*);
1050 }
1051 
1052 //-------------------- Iteration --------------------
1053 
1054 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
1055   _hrrs(hrrs),
1056   _g1h(G1CollectedHeap::heap()),
1057   _coarse_map(&hrrs->_other_regions._coarse_map),
1058   _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
1059   _bosa(hrrs->bosa()),
1060   _is(Sparse),
1061   // Set these values so that we increment to the first region.
1062   _coarse_cur_region_index(-1),
1063   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1064   _cur_region_cur_card(0),
1065   _fine_array_index(-1),
1066   _fine_cur_prt(NULL),
1067   _n_yielded_coarse(0),
1068   _n_yielded_fine(0),
1069   _n_yielded_sparse(0),
1070   _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1071 
1072 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1073   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1074   // Go to the next card.


   1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 242   static size_t fl_mem_size() {
 243     PerRegionTable* cur = _free_list;
 244     size_t res = 0;
 245     while (cur != NULL) {
 246       res += cur->mem_size();
 247       cur = cur->next();
 248     }
 249     return res;
 250   }
 251 
 252   static void test_fl_mem_size();
 253 };
 254 
 255 PerRegionTable* PerRegionTable::_free_list = NULL;
 256 
 257 size_t OtherRegionsTable::_max_fine_entries = 0;
 258 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
 259 size_t OtherRegionsTable::_fine_eviction_stride = 0;
 260 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
 261 
 262 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
 263   _g1h(G1CollectedHeap::heap()),
 264   _hr(hr), _m(m),

 265   _coarse_map(G1CollectedHeap::heap()->max_regions(),
 266               false /* in-resource-area */),
 267   _fine_grain_regions(NULL),
 268   _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
 269   _n_fine_entries(0), _n_coarse_entries(0),
 270   _fine_eviction_start(0),
 271   _sparse_table(hr)
 272 {
 273   typedef PerRegionTable* PerRegionTablePtr;
 274 
 275   if (_max_fine_entries == 0) {
 276     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
 277     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
 278     _max_fine_entries = (size_t)1 << max_entries_log;
 279     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 280 
 281     assert(_fine_eviction_sample_size == 0
 282            && _fine_eviction_stride == 0, "All init at same time.");
 283     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
 284     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;


 424     _from_card_cache[tid][cur_hrs_ind] = from_card;
 425   }
 426 
 427   // Note that this may be a continued H region.
 428   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
 429   RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
 430 
 431   // If the region is already coarsened, return.
 432   if (_coarse_map.at(from_hrs_ind)) {
 433     if (G1TraceHeapRegionRememberedSet) {
 434       gclog_or_tty->print_cr("  coarse map hit.");
 435     }
 436     assert(contains_reference(from), "We just added it!");
 437     return;
 438   }
 439 
 440   // Otherwise find a per-region table to add it to.
 441   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
 442   PerRegionTable* prt = find_region_table(ind, from_hr);
 443   if (prt == NULL) {
 444     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 445     // Confirm that it's really not there...
 446     prt = find_region_table(ind, from_hr);
 447     if (prt == NULL) {
 448 
 449       uintptr_t from_hr_bot_card_index =
 450         uintptr_t(from_hr->bottom())
 451           >> CardTableModRefBS::card_shift;
 452       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 453       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 454              "Must be in range.");
 455       if (G1HRRSUseSparseTable &&
 456           _sparse_table.add_card(from_hrs_ind, card_index)) {
 457         if (G1RecordHRRSOops) {
 458           HeapRegionRemSet::record(hr(), from);
 459           if (G1TraceHeapRegionRememberedSet) {
 460             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
 461                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 462                                 align_size_down(uintptr_t(from),
 463                                                 CardTableModRefBS::card_size),
 464                                 hr()->bottom(), from);


 526                           hr()->bottom(), from);
 527     }
 528   }
 529   assert(contains_reference(from), "We just added it!");
 530 }
 531 
 532 PerRegionTable*
 533 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
 534   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
 535   PerRegionTable* prt = _fine_grain_regions[ind];
 536   while (prt != NULL && prt->hr() != hr) {
 537     prt = prt->collision_list_next();
 538   }
 539   // Loop postcondition is the method postcondition.
 540   return prt;
 541 }
 542 
 543 jint OtherRegionsTable::_n_coarsenings = 0;
 544 
 545 PerRegionTable* OtherRegionsTable::delete_region_table() {
 546   assert(_m->owned_by_self(), "Precondition");
 547   assert(_n_fine_entries == _max_fine_entries, "Precondition");
 548   PerRegionTable* max = NULL;
 549   jint max_occ = 0;
 550   PerRegionTable** max_prev;
 551   size_t max_ind;
 552 
 553   size_t i = _fine_eviction_start;
 554   for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
 555     size_t ii = i;
 556     // Make sure we get a non-NULL sample.
 557     while (_fine_grain_regions[ii] == NULL) {
 558       ii++;
 559       if (ii == _max_fine_entries) ii = 0;
 560       guarantee(ii != i, "We must find one.");
 561     }
 562     PerRegionTable** prev = &_fine_grain_regions[ii];
 563     PerRegionTable* cur = *prev;
 564     while (cur != NULL) {
 565       jint cur_occ = cur->occupied();
 566       if (max == NULL || cur_occ > max_occ) {


 658         if (cur->occupied() == 0) {
 659           *prev = nxt;
 660           cur->set_collision_list_next(NULL);
 661           _n_fine_entries--;
 662           unlink_from_all(cur);
 663           PerRegionTable::free(cur);
 664         } else {
 665           prev = cur->collision_list_next_addr();
 666         }
 667       }
 668       cur = nxt;
 669     }
 670   }
 671   // Since we may have deleted a from_card_cache entry from the RS, clear
 672   // the FCC.
 673   clear_fcc();
 674 }
 675 
 676 
 677 size_t OtherRegionsTable::occupied() const {


 678   size_t sum = occ_fine();
 679   sum += occ_sparse();
 680   sum += occ_coarse();
 681   return sum;
 682 }
 683 
 684 size_t OtherRegionsTable::occ_fine() const {
 685   size_t sum = 0;
 686 
 687   size_t num = 0;
 688   PerRegionTable * cur = _first_all_fine_prts;
 689   while (cur != NULL) {
 690     sum += cur->occupied();
 691     cur = cur->next();
 692     num++;
 693   }
 694   guarantee(num == _n_fine_entries, "just checking");
 695   return sum;
 696 }
 697 
 698 size_t OtherRegionsTable::occ_coarse() const {
 699   return (_n_coarse_entries * HeapRegion::CardsPerRegion);
 700 }
 701 
 702 size_t OtherRegionsTable::occ_sparse() const {
 703   return _sparse_table.occupied();
 704 }
 705 
 706 size_t OtherRegionsTable::mem_size() const {


 707   size_t sum = 0;
 708   // all PRTs are of the same size so it is sufficient to query only one of them.
 709   if (_first_all_fine_prts != NULL) {
 710     assert(_last_all_fine_prts != NULL &&
 711       _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
 712     sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
 713   }
 714   sum += (sizeof(PerRegionTable*) * _max_fine_entries);
 715   sum += (_coarse_map.size_in_words() * HeapWordSize);
 716   sum += (_sparse_table.mem_size());
 717   sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
 718   return sum;
 719 }
 720 
 721 size_t OtherRegionsTable::static_mem_size() {
 722   return _from_card_cache_mem_size;
 723 }
 724 
 725 size_t OtherRegionsTable::fl_mem_size() {
 726   return PerRegionTable::fl_mem_size();
 727 }
 728 
 729 void OtherRegionsTable::clear_fcc() {
 730   size_t hrs_idx = hr()->hrs_index();
 731   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
 732     _from_card_cache[i][hrs_idx] = -1;
 733   }
 734 }
 735 
 736 void OtherRegionsTable::clear() {

 737   // if there are no entries, skip this step
 738   if (_first_all_fine_prts != NULL) {
 739     guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
 740     PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
 741     memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
 742   } else {
 743     guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
 744   }
 745 
 746   _first_all_fine_prts = _last_all_fine_prts = NULL;
 747   _sparse_table.clear();
 748   _coarse_map.clear();
 749   _n_fine_entries = 0;
 750   _n_coarse_entries = 0;
 751 
 752   clear_fcc();
 753 }
 754 
 755 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
 756   MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 757   size_t hrs_ind = (size_t) from_hr->hrs_index();
 758   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
 759   if (del_single_region_table(ind, from_hr)) {
 760     assert(!_coarse_map.at(hrs_ind), "Inv");
 761   } else {
 762     _coarse_map.par_at_put(hrs_ind, 0);
 763   }
 764   // Check to see if any of the fcc entries come from here.
 765   size_t hr_ind = (size_t) hr()->hrs_index();
 766   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
 767     int fcc_ent = _from_card_cache[tid][hr_ind];
 768     if (fcc_ent != -1) {
 769       HeapWord* card_addr = (HeapWord*)
 770         (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
 771       if (hr()->is_in_reserved(card_addr)) {
 772         // Clear the from card cache.
 773         _from_card_cache[tid][hr_ind] = -1;
 774       }
 775     }
 776   }


 782   PerRegionTable** prev_addr = &_fine_grain_regions[ind];
 783   PerRegionTable* prt = *prev_addr;
 784   while (prt != NULL && prt->hr() != hr) {
 785     prev_addr = prt->collision_list_next_addr();
 786     prt = prt->collision_list_next();
 787   }
 788   if (prt != NULL) {
 789     assert(prt->hr() == hr, "Loop postcondition.");
 790     *prev_addr = prt->collision_list_next();
 791     unlink_from_all(prt);
 792     PerRegionTable::free(prt);
 793     _n_fine_entries--;
 794     return true;
 795   } else {
 796     return false;
 797   }
 798 }
 799 
 800 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
 801   // Cast away const in this case.
 802   MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
 803   return contains_reference_locked(from);
 804 }
 805 
 806 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
 807   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
 808   if (hr == NULL) return false;
 809   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
 810   // Is this region in the coarse map?
 811   if (_coarse_map.at(hr_ind)) return true;
 812 
 813   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
 814                                      hr);
 815   if (prt != NULL) {
 816     return prt->contains_reference(from);
 817 
 818   } else {
 819     uintptr_t from_card =
 820       (uintptr_t(from) >> CardTableModRefBS::card_shift);
 821     uintptr_t hr_bot_card_index =
 822       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;


 827     return _sparse_table.contains_card(hr_ind, card_index);
 828   }
 829 
 830 
 831 }
 832 
 833 void
 834 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 835   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 836 }
 837 
 838 // Determines how many threads can add records to an rset in parallel.
 839 // This can be done by either mutator threads together with the
 840 // concurrent refinement threads or GC threads.
 841 int HeapRegionRemSet::num_par_rem_sets() {
 842   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
 843 }
 844 
 845 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 846                                    HeapRegion* hr)
 847   : _bosa(bosa), _m(Mutex::leaf, "An OtherRegionsTable lock", true),
 848     _code_roots(), _other_regions(hr, &_m) {
 849   reset_for_par_iteration();
 850 }
 851 
 852 void HeapRegionRemSet::setup_remset_size() {
 853   // Setup sparse and fine-grain tables sizes.
 854   // table_size = base * (log(region_size / 1M) + 1)
 855   const int LOG_M = 20;
 856   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 857   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 858     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 859   }
 860   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 861     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 862   }
 863   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 864 }
 865 
 866 bool HeapRegionRemSet::claim_iter() {
 867   if (_iter_state != Unclaimed) return false;
 868   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
 869   return (res == Unclaimed);
 870 }
 871 
 872 void HeapRegionRemSet::set_iter_complete() {
 873   _iter_state = Complete;
 874 }
 875 
 876 bool HeapRegionRemSet::iter_is_complete() {
 877   return _iter_state == Complete;
 878 }
 879 
 880 #ifndef PRODUCT
 881 void HeapRegionRemSet::print() {
 882   HeapRegionRemSetIterator iter(this);
 883   size_t card_index;
 884   while (iter.has_next(card_index)) {
 885     HeapWord* card_start =
 886       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
 887     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
 888   }
 889   if (iter.n_yielded() != occupied()) {
 890     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
 891     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
 892                   iter.n_yielded(),
 893                   iter.n_yielded_coarse(), iter.n_yielded_fine());
 894     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
 895                   occupied(), occ_coarse(), occ_fine());
 896   }
 897   guarantee(iter.n_yielded() == occupied(),
 898             "We should have yielded all the represented cards.");
 899 }
 900 #endif
 901 
 902 void HeapRegionRemSet::cleanup() {
 903   SparsePRT::cleanup_all();
 904 }
 905 
 906 void HeapRegionRemSet::clear() {
 907   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
 908   clear_locked();
 909 }


 910 
 911 void HeapRegionRemSet::clear_locked() {
 912   _code_roots.clear();
 913   _other_regions.clear();
 914   assert(occupied_locked() == 0, "Should be clear.");
 915   reset_for_par_iteration();
 916 }
 917 
 918 void HeapRegionRemSet::reset_for_par_iteration() {
 919   _iter_state = Unclaimed;
 920   _iter_claimed = 0;
 921   // It's good to check this to make sure that the two methods are in sync.
 922   assert(verify_ready_for_par_iteration(), "post-condition");
 923 }
 924 
 925 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
 926                              BitMap* region_bm, BitMap* card_bm) {
 927   _other_regions.scrub(ctbs, region_bm, card_bm);
 928 }
 929 
 930 
 931 // Code roots support
 932 
 933 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
 934   assert(nm != NULL, "sanity");
 935   _code_roots.add(nm);





 936 }
 937 
 938 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
 939   assert(nm != NULL, "sanity");
 940   _code_roots.remove(nm);



 941   // Check that there were no duplicates
 942   guarantee(!_code_roots.contains(nm), "duplicate entry found");
 943 }
 944 
 945 class NMethodMigrationOopClosure : public OopClosure {
 946   G1CollectedHeap* _g1h;
 947   HeapRegion* _from;
 948   nmethod* _nm;
 949 
 950   uint _num_self_forwarded;
 951 
 952   template <class T> void do_oop_work(T* p) {
 953     T heap_oop = oopDesc::load_heap_oop(p);
 954     if (!oopDesc::is_null(heap_oop)) {
 955       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 956       if (_from->is_in(obj)) {
 957         // Reference still points into the source region.
 958         // Since roots are immediately evacuated this means that
 959         // we must have self forwarded the object
 960         assert(obj->is_forwarded(),
 961                err_msg("code roots should be immediately evacuated. "
 962                        "Ref: "PTR_FORMAT", "


 984     _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
 985 
 986   void do_oop(narrowOop* p) { do_oop_work(p); }
 987   void do_oop(oop* p)       { do_oop_work(p); }
 988 
 989   uint retain() { return _num_self_forwarded > 0; }
 990 };
 991 
 992 void HeapRegionRemSet::migrate_strong_code_roots() {
 993   assert(hr()->in_collection_set(), "only collection set regions");
 994   assert(!hr()->isHumongous(),
 995          err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
 996                  HR_FORMAT_PARAMS(hr())));
 997 
 998   ResourceMark rm;
 999 
1000   // List of code blobs to retain for this region
1001   GrowableArray<nmethod*> to_be_retained(10);
1002   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1003 
1004   while (!_code_roots.is_empty()) {
1005     nmethod *nm = _code_roots.pop();
1006     if (nm != NULL) {
1007       NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
1008       nm->oops_do(&oop_cl);
1009       if (oop_cl.retain()) {
1010         to_be_retained.push(nm);
1011       }
1012     }
1013   }
1014 
1015   // Now push any code roots we need to retain
1016   assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
1017          "Retained nmethod list must be empty or "
1018          "evacuation of this region failed");
1019 
1020   while (to_be_retained.is_nonempty()) {
1021     nmethod* nm = to_be_retained.pop();
1022     assert(nm != NULL, "sanity");
1023     add_strong_code_root(nm);
1024   }
1025 }
1026 
1027 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1028   _code_roots.nmethods_do(blk);



1029 }
1030 
1031 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1032   return _code_roots.mem_size();

1033 }
1034 
1035 //-------------------- Iteration --------------------
1036 
1037 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
1038   _hrrs(hrrs),
1039   _g1h(G1CollectedHeap::heap()),
1040   _coarse_map(&hrrs->_other_regions._coarse_map),
1041   _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
1042   _bosa(hrrs->bosa()),
1043   _is(Sparse),
1044   // Set these values so that we increment to the first region.
1045   _coarse_cur_region_index(-1),
1046   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1047   _cur_region_cur_card(0),
1048   _fine_array_index(-1),
1049   _fine_cur_prt(NULL),
1050   _n_yielded_coarse(0),
1051   _n_yielded_fine(0),
1052   _n_yielded_sparse(0),
1053   _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1054 
1055 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1056   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1057   // Go to the next card.