src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Print this page
rev 6583 : 8047328: Change typedef CardIdx_t from int to uint16_t
Summary: Changed the typedef and made the necessary changes to code interacting with the SparsePRTEntry class.
Reviewed-by:
Contributed-by: andreas.sjoberg@oracle.com


  92 
  93     if (G1TraceHeapRegionRememberedSet) {
  94       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
  95                              from,
  96                              UseCompressedOops
  97                              ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
  98                              : (void *)oopDesc::load_decode_heap_oop((oop*)from));
  99     }
 100 
 101     HeapRegion* loc_hr = hr();
 102     // If the test below fails, then this table was reused concurrently
 103     // with this operation.  This is OK, since the old table was coarsened,
 104     // and adding a bit to the new table is never incorrect.
 105     // If the table used to belong to a continues humongous region and is
 106     // now reused for the corresponding start humongous region, we need to
 107     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 108     // instead of just is_in_reserved() here.
 109     if (loc_hr->is_in_reserved_raw(from)) {
 110       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 111       CardIdx_t from_card = (CardIdx_t)
 112           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 113 
 114       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
 115              "Must be in range.");
 116       add_card_work(from_card, par);
 117     }
 118   }
 119 
 120 public:
 121 
 122   HeapRegion* hr() const { return _hr; }
 123 
 124   jint occupied() const {
 125     // Overkill, but if we ever need it...
 126     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 127     return _occupied;
 128   }
 129 
 130   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 131     if (clear_links_to_all_list) {
 132       set_next(NULL);
 133       set_prev(NULL);
 134     }


 454     if (G1TraceHeapRegionRememberedSet) {
 455       gclog_or_tty->print_cr("  coarse map hit.");
 456     }
 457     assert(contains_reference(from), "We just added it!");
 458     return;
 459   }
 460 
 461   // Otherwise find a per-region table to add it to.
 462   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
 463   PerRegionTable* prt = find_region_table(ind, from_hr);
 464   if (prt == NULL) {
 465     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 466     // Confirm that it's really not there...
 467     prt = find_region_table(ind, from_hr);
 468     if (prt == NULL) {
 469 
 470       uintptr_t from_hr_bot_card_index =
 471         uintptr_t(from_hr->bottom())
 472           >> CardTableModRefBS::card_shift;
 473       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 474       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 475              "Must be in range.");
 476       if (G1HRRSUseSparseTable &&
 477           _sparse_table.add_card(from_hrs_ind, card_index)) {
 478         if (G1RecordHRRSOops) {
 479           HeapRegionRemSet::record(hr(), from);
 480           if (G1TraceHeapRegionRememberedSet) {
 481             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
 482                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 483                                 align_size_down(uintptr_t(from),
 484                                                 CardTableModRefBS::card_size),
 485                                 hr()->bottom(), from);
 486           }
 487         }
 488         if (G1TraceHeapRegionRememberedSet) {
 489           gclog_or_tty->print_cr("   added card to sparse table.");
 490         }
 491         assert(contains_reference_locked(from), "We just added it!");
 492         return;
 493       } else {
 494         if (G1TraceHeapRegionRememberedSet) {


 500 
 501       if (_n_fine_entries == _max_fine_entries) {
 502         prt = delete_region_table();
 503         // There is no need to clear the links to the 'all' list here:
 504         // prt will be reused immediately, i.e. remain in the 'all' list.
 505         prt->init(from_hr, false /* clear_links_to_all_list */);
 506       } else {
 507         prt = PerRegionTable::alloc(from_hr);
 508         link_to_all(prt);
 509       }
 510 
 511       PerRegionTable* first_prt = _fine_grain_regions[ind];
 512       prt->set_collision_list_next(first_prt);
 513       _fine_grain_regions[ind] = prt;
 514       _n_fine_entries++;
 515 
 516       if (G1HRRSUseSparseTable) {
 517         // Transfer from sparse to fine-grain.
 518         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
 519         assert(sprt_entry != NULL, "There should have been an entry");
 520         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
 521           CardIdx_t c = sprt_entry->card(i);
 522           if (c != SparsePRTEntry::NullEntry) {
 523             prt->add_card(c);
 524           }
 525         }
 526         // Now we can delete the sparse entry.
 527         bool res = _sparse_table.delete_entry(from_hrs_ind);
 528         assert(res, "It should have been there.");
 529       }
 530     }
 531     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 532   }
 533   // Note that we can't assert "prt->hr() == from_hr", because of the
 534   // possibility of concurrent reuse.  But see head comment of
 535   // OtherRegionsTable for why this is OK.
 536   assert(prt != NULL, "Inv");
 537 
 538   prt->add_reference(from);
 539 
 540   if (G1RecordHRRSOops) {
 541     HeapRegionRemSet::record(hr(), from);
 542     if (G1TraceHeapRegionRememberedSet) {
 543       gclog_or_tty->print("Added card " PTR_FORMAT " to region "
 544                           "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 545                           align_size_down(uintptr_t(from),


 798 }
 799 
 800 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
 801   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
 802   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
 803   // Is this region in the coarse map?
 804   if (_coarse_map.at(hr_ind)) return true;
 805 
 806   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
 807                                      hr);
 808   if (prt != NULL) {
 809     return prt->contains_reference(from);
 810 
 811   } else {
 812     uintptr_t from_card =
 813       (uintptr_t(from) >> CardTableModRefBS::card_shift);
 814     uintptr_t hr_bot_card_index =
 815       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
 816     assert(from_card >= hr_bot_card_index, "Inv");
 817     CardIdx_t card_index = from_card - hr_bot_card_index;
 818     assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 819            "Must be in range.");
 820     return _sparse_table.contains_card(hr_ind, card_index);
 821   }
 822 }
 823 
 824 void
 825 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 826   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 827 }
 828 
 829 // Determines how many threads can add records to an rset in parallel.
 830 // This can be done by either mutator threads together with the
 831 // concurrent refinement threads or GC threads.
 832 uint HeapRegionRemSet::num_par_rem_sets() {
 833   return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 834 }
 835 
 836 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 837                                    HeapRegion* hr)
 838   : _bosa(bosa),




  92 
  93     if (G1TraceHeapRegionRememberedSet) {
  94       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
  95                              from,
  96                              UseCompressedOops
  97                              ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
  98                              : (void *)oopDesc::load_decode_heap_oop((oop*)from));
  99     }
 100 
 101     HeapRegion* loc_hr = hr();
 102     // If the test below fails, then this table was reused concurrently
 103     // with this operation.  This is OK, since the old table was coarsened,
 104     // and adding a bit to the new table is never incorrect.
 105     // If the table used to belong to a continues humongous region and is
 106     // now reused for the corresponding start humongous region, we need to
 107     // make sure that we detect this. Thus, we call is_in_reserved_raw()
 108     // instead of just is_in_reserved() here.
 109     if (loc_hr->is_in_reserved_raw(from)) {
 110       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 111       CardIdx_t from_card = (CardIdx_t)
 112           (hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize));
 113 
 114       assert((size_t)from_card < HeapRegion::CardsPerRegion,
 115              "Must be in range.");
 116       add_card_work(from_card, par);
 117     }
 118   }
 119 
 120 public:
 121 
 122   HeapRegion* hr() const { return _hr; }
 123 
 124   jint occupied() const {
 125     // Overkill, but if we ever need it...
 126     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 127     return _occupied;
 128   }
 129 
 130   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 131     if (clear_links_to_all_list) {
 132       set_next(NULL);
 133       set_prev(NULL);
 134     }


 454     if (G1TraceHeapRegionRememberedSet) {
 455       gclog_or_tty->print_cr("  coarse map hit.");
 456     }
 457     assert(contains_reference(from), "We just added it!");
 458     return;
 459   }
 460 
 461   // Otherwise find a per-region table to add it to.
 462   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
 463   PerRegionTable* prt = find_region_table(ind, from_hr);
 464   if (prt == NULL) {
 465     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 466     // Confirm that it's really not there...
 467     prt = find_region_table(ind, from_hr);
 468     if (prt == NULL) {
 469 
 470       uintptr_t from_hr_bot_card_index =
 471         uintptr_t(from_hr->bottom())
 472           >> CardTableModRefBS::card_shift;
 473       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 474       assert((size_t)card_index < HeapRegion::CardsPerRegion,
 475              "Must be in range.");
 476       if (G1HRRSUseSparseTable &&
 477           _sparse_table.add_card(from_hrs_ind, card_index)) {
 478         if (G1RecordHRRSOops) {
 479           HeapRegionRemSet::record(hr(), from);
 480           if (G1TraceHeapRegionRememberedSet) {
 481             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
 482                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 483                                 align_size_down(uintptr_t(from),
 484                                                 CardTableModRefBS::card_size),
 485                                 hr()->bottom(), from);
 486           }
 487         }
 488         if (G1TraceHeapRegionRememberedSet) {
 489           gclog_or_tty->print_cr("   added card to sparse table.");
 490         }
 491         assert(contains_reference_locked(from), "We just added it!");
 492         return;
 493       } else {
 494         if (G1TraceHeapRegionRememberedSet) {


 500 
 501       if (_n_fine_entries == _max_fine_entries) {
 502         prt = delete_region_table();
 503         // There is no need to clear the links to the 'all' list here:
 504         // prt will be reused immediately, i.e. remain in the 'all' list.
 505         prt->init(from_hr, false /* clear_links_to_all_list */);
 506       } else {
 507         prt = PerRegionTable::alloc(from_hr);
 508         link_to_all(prt);
 509       }
 510 
 511       PerRegionTable* first_prt = _fine_grain_regions[ind];
 512       prt->set_collision_list_next(first_prt);
 513       _fine_grain_regions[ind] = prt;
 514       _n_fine_entries++;
 515 
 516       if (G1HRRSUseSparseTable) {
 517         // Transfer from sparse to fine-grain.
 518         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
 519         assert(sprt_entry != NULL, "There should have been an entry");
 520         for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
 521           CardIdx_t c = sprt_entry->card(i);

 522           prt->add_card(c);
 523         }

 524         // Now we can delete the sparse entry.
 525         bool res = _sparse_table.delete_entry(from_hrs_ind);
 526         assert(res, "It should have been there.");
 527       }
 528     }
 529     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 530   }
 531   // Note that we can't assert "prt->hr() == from_hr", because of the
 532   // possibility of concurrent reuse.  But see head comment of
 533   // OtherRegionsTable for why this is OK.
 534   assert(prt != NULL, "Inv");
 535 
 536   prt->add_reference(from);
 537 
 538   if (G1RecordHRRSOops) {
 539     HeapRegionRemSet::record(hr(), from);
 540     if (G1TraceHeapRegionRememberedSet) {
 541       gclog_or_tty->print("Added card " PTR_FORMAT " to region "
 542                           "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
 543                           align_size_down(uintptr_t(from),


 796 }
 797 
 798 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
 799   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
 800   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
 801   // Is this region in the coarse map?
 802   if (_coarse_map.at(hr_ind)) return true;
 803 
 804   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
 805                                      hr);
 806   if (prt != NULL) {
 807     return prt->contains_reference(from);
 808 
 809   } else {
 810     uintptr_t from_card =
 811       (uintptr_t(from) >> CardTableModRefBS::card_shift);
 812     uintptr_t hr_bot_card_index =
 813       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
 814     assert(from_card >= hr_bot_card_index, "Inv");
 815     CardIdx_t card_index = from_card - hr_bot_card_index;
 816     assert((size_t)card_index < HeapRegion::CardsPerRegion,
 817            "Must be in range.");
 818     return _sparse_table.contains_card(hr_ind, card_index);
 819   }
 820 }
 821 
 822 void
 823 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
 824   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
 825 }
 826 
 827 // Determines how many threads can add records to an rset in parallel.
 828 // This can be done by either mutator threads together with the
 829 // concurrent refinement threads or GC threads.
 830 uint HeapRegionRemSet::num_par_rem_sets() {
 831   return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 832 }
 833 
 834 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
 835                                    HeapRegion* hr)
 836   : _bosa(bosa),