src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Print this page
rev 4803 : imported patch thomas-comments-2


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/space.inline.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #include "utilities/globalDefinitions.hpp"

  36 
  37 class PerRegionTable: public CHeapObj<mtGC> {
  38   friend class OtherRegionsTable;
  39   friend class HeapRegionRemSetIterator;
  40 
  41   HeapRegion*     _hr;
  42   BitMap          _bm;
  43   jint            _occupied;
  44 
  45   // next pointer for free/allocated 'all' list
  46   PerRegionTable* _next;
  47 
  48   // prev pointer for the allocated 'all' list
  49   PerRegionTable* _prev;
  50 
  51   // next pointer in collision list
  52   PerRegionTable * _collision_list_next;
  53 
  54   // Global free list of PRTs
  55   static PerRegionTable* _free_list;


 832     return _sparse_table.contains_card(hr_ind, card_index);
 833   }
 834 
 835 
 836 }
 837 
 838 void
 839 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 840   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 841 }
 842 
 843 // Determines how many threads can add records to an rset in parallel.
 844 // This can be done by either mutator threads together with the
 845 // concurrent refinement threads or GC threads.
 846 int HeapRegionRemSet::num_par_rem_sets() {
 847   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
 848 }
 849 
 850 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 851                                    HeapRegion* hr)
 852   : _bosa(bosa), _other_regions(hr) {
 853   reset_for_par_iteration();
 854 }
 855 
 856 void HeapRegionRemSet::setup_remset_size() {
 857   // Setup sparse and fine-grain tables sizes.
 858   // table_size = base * (log(region_size / 1M) + 1)
 859   const int LOG_M = 20;
 860   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 861   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 862     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 863   }
 864   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 865     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 866   }
 867   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 868 }
 869 
 870 bool HeapRegionRemSet::claim_iter() {
 871   if (_iter_state != Unclaimed) return false;
 872   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);


 891     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
 892   }
 893   if (iter.n_yielded() != occupied()) {
 894     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
 895     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
 896                   iter.n_yielded(),
 897                   iter.n_yielded_coarse(), iter.n_yielded_fine());
 898     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
 899                   occupied(), occ_coarse(), occ_fine());
 900   }
 901   guarantee(iter.n_yielded() == occupied(),
 902             "We should have yielded all the represented cards.");
 903 }
 904 #endif
 905 
 906 void HeapRegionRemSet::cleanup() {
 907   SparsePRT::cleanup_all();
 908 }
 909 
 910 void HeapRegionRemSet::clear() {






 911   _other_regions.clear();
 912   assert(occupied() == 0, "Should be clear.");
 913   reset_for_par_iteration();
 914 }
 915 
 916 void HeapRegionRemSet::reset_for_par_iteration() {
 917   _iter_state = Unclaimed;
 918   _iter_claimed = 0;
 919   // It's good to check this to make sure that the two methods are in sync.
 920   assert(verify_ready_for_par_iteration(), "post-condition");
 921 }
 922 
 923 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
 924                              BitMap* region_bm, BitMap* card_bm) {
 925   _other_regions.scrub(ctbs, region_bm, card_bm);
 926 }
 927 
















































































































 928 //-------------------- Iteration --------------------
 929 
 930 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
 931   _hrrs(hrrs),
 932   _g1h(G1CollectedHeap::heap()),
 933   _coarse_map(&hrrs->_other_regions._coarse_map),
 934   _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
 935   _bosa(hrrs->bosa()),
 936   _is(Sparse),
 937   // Set these values so that we increment to the first region.
 938   _coarse_cur_region_index(-1),
 939   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
 940   _cur_region_cur_card(0),
 941   _fine_array_index(-1),
 942   _fine_cur_prt(NULL),
 943   _n_yielded_coarse(0),
 944   _n_yielded_fine(0),
 945   _n_yielded_sparse(0),
 946   _sparse_iter(&hrrs->_other_regions._sparse_table) {}
 947 




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/space.inline.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 #include "utilities/growableArray.hpp"
  37 
  38 class PerRegionTable: public CHeapObj<mtGC> {
  39   friend class OtherRegionsTable;
  40   friend class HeapRegionRemSetIterator;
  41 
  42   HeapRegion*     _hr;
  43   BitMap          _bm;
  44   jint            _occupied;
  45 
  46   // next pointer for free/allocated 'all' list
  47   PerRegionTable* _next;
  48 
  49   // prev pointer for the allocated 'all' list
  50   PerRegionTable* _prev;
  51 
  52   // next pointer in collision list
  53   PerRegionTable * _collision_list_next;
  54 
  55   // Global free list of PRTs
  56   static PerRegionTable* _free_list;


 833     return _sparse_table.contains_card(hr_ind, card_index);
 834   }
 835 
 836 
 837 }
 838 
 839 void
 840 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 841   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 842 }
 843 
 844 // Determines how many threads can add records to an rset in parallel.
 845 // This can be done by either mutator threads together with the
 846 // concurrent refinement threads or GC threads.
 847 int HeapRegionRemSet::num_par_rem_sets() {
 848   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
 849 }
 850 
 851 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 852                                    HeapRegion* hr)
 853   : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
 854   reset_for_par_iteration();
 855 }
 856 
 857 void HeapRegionRemSet::setup_remset_size() {
 858   // Setup sparse and fine-grain tables sizes.
 859   // table_size = base * (log(region_size / 1M) + 1)
 860   const int LOG_M = 20;
 861   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
 862   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
 863     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
 864   }
 865   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
 866     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
 867   }
 868   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 869 }
 870 
 871 bool HeapRegionRemSet::claim_iter() {
 872   if (_iter_state != Unclaimed) return false;
 873   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);


 892     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
 893   }
 894   if (iter.n_yielded() != occupied()) {
 895     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
 896     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
 897                   iter.n_yielded(),
 898                   iter.n_yielded_coarse(), iter.n_yielded_fine());
 899     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
 900                   occupied(), occ_coarse(), occ_fine());
 901   }
 902   guarantee(iter.n_yielded() == occupied(),
 903             "We should have yielded all the represented cards.");
 904 }
 905 #endif
 906 
 907 void HeapRegionRemSet::cleanup() {
 908   SparsePRT::cleanup_all();
 909 }
 910 
 911 void HeapRegionRemSet::clear() {
 912   if (_strong_code_roots_list != NULL) {
 913     delete _strong_code_roots_list;
 914   }
 915   _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
 916                                 GrowableArray<nmethod*>(10, 0, NULL, true);
 917 
 918   _other_regions.clear();
 919   assert(occupied() == 0, "Should be clear.");
 920   reset_for_par_iteration();
 921 }
 922 
 923 void HeapRegionRemSet::reset_for_par_iteration() {
 924   _iter_state = Unclaimed;
 925   _iter_claimed = 0;
 926   // It's good to check this to make sure that the two methods are in sync.
 927   assert(verify_ready_for_par_iteration(), "post-condition");
 928 }
 929 
 930 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
 931                              BitMap* region_bm, BitMap* card_bm) {
 932   _other_regions.scrub(ctbs, region_bm, card_bm);
 933 }
 934 
 935 
 936 // Code roots support
 937 
 938 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
 939   assert(nm != NULL, "sanity");
 940   // Search for the code blob from the RHS to avoid
 941   // duplicate entries as much as possible
 942   if (_strong_code_roots_list->find_from_end(nm) < 0) {
 943     // Code blob isn't already in the list
 944     _strong_code_roots_list->push(nm);
 945   }
 946 }
 947 
 948 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
 949   assert(nm != NULL, "sanity");
 950   int idx = _strong_code_roots_list->find(nm);
 951   if (idx >= 0) {
 952     _strong_code_roots_list->remove_at(idx);
 953   }
 954   // Check that there were no duplicates
 955   guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
 956 }
 957 
 958 class NMethodMigrationOopClosure : public OopClosure {
 959   G1CollectedHeap* _g1h;
 960   HeapRegion* _from;
 961   nmethod* _nm;
 962 
 963   uint _num_self_forwarded;
 964 
 965   template <class T> void do_oop_work(T* p) {
 966     T heap_oop = oopDesc::load_heap_oop(p);
 967     if (!oopDesc::is_null(heap_oop)) {
 968       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 969       if (_from->is_in(obj)) {
 970         // Reference still points into the source region.
 971         // Since roots are immediately evacuated this means that
 972         // we must have self forwarded the object
 973         assert(obj->is_forwarded(),
 974                err_msg("code roots should be immediately evacuated. "
 975                        "Ref: "PTR_FORMAT", "
 976                        "Obj: "PTR_FORMAT", "
 977                        "Region: "HR_FORMAT,
 978                        p, (void*) obj, HR_FORMAT_PARAMS(_from)));
 979         assert(obj->forwardee() == obj,
 980                err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
 981 
 982         // The object has been self forwarded.
 983         // Note, if we're during an initial mark pause, there is
 984         // no need to explicitly mark object. It will be marked
 985         // during the regular evacuation failure handling code.
 986         _num_self_forwarded++;
 987       } else {
 988         // The reference points into a promotion or to-space region
 989         HeapRegion* to = _g1h->heap_region_containing(obj);
 990         to->rem_set()->add_strong_code_root(_nm);
 991       }
 992     }
 993   }
 994 
 995 public:
 996   NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
 997     _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
 998 
 999   void do_oop(narrowOop* p) { do_oop_work(p); }
1000   void do_oop(oop* p)       { do_oop_work(p); }
1001 
1002   uint retain() { return _num_self_forwarded > 0; }
1003 };
1004 
1005 void HeapRegionRemSet::migrate_strong_code_roots() {
1006   assert(hr()->in_collection_set(), "only collection set regions");
1007   assert(!hr()->isHumongous(), "not humongous regions");
1008 
1009   ResourceMark rm;
1010 
1011   // List of code blobs to retain for this region
1012   GrowableArray<nmethod*> to_be_retained(10);
1013   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1014 
1015   while (_strong_code_roots_list->is_nonempty()) {
1016     nmethod *nm = _strong_code_roots_list->pop();
1017     if (nm != NULL) {
1018       NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
1019       nm->oops_do(&oop_cl);
1020       if (oop_cl.retain()) {
1021         to_be_retained.push(nm);
1022       }
1023     }
1024   }
1025 
1026   // Now push any code roots we need to retain
1027   // FIXME: assert that region got an evacuation failure if non-empty
1028   while (to_be_retained.is_nonempty()) {
1029     nmethod* nm = to_be_retained.pop();
1030     assert(nm != NULL, "sanity");
1031     add_strong_code_root(nm);
1032   }
1033 }
1034 
1035 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1036   for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
1037     nmethod* nm = _strong_code_roots_list->at(i);
1038     blk->do_code_blob(nm);
1039   }
1040 }
1041 
1042 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1043   return sizeof(GrowableArray<nmethod*>) +
1044          _strong_code_roots_list->max_length() * sizeof(nmethod*);
1045 }
1046 
1047 //-------------------- Iteration --------------------
1048 
1049 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
1050   _hrrs(hrrs),
1051   _g1h(G1CollectedHeap::heap()),
1052   _coarse_map(&hrrs->_other_regions._coarse_map),
1053   _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
1054   _bosa(hrrs->bosa()),
1055   _is(Sparse),
1056   // Set these values so that we increment to the first region.
1057   _coarse_cur_region_index(-1),
1058   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1059   _cur_region_cur_card(0),
1060   _fine_array_index(-1),
1061   _fine_cur_prt(NULL),
1062   _n_yielded_coarse(0),
1063   _n_yielded_fine(0),
1064   _n_yielded_sparse(0),
1065   _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1066