< prev index next >

src/share/vm/gc/g1/heapRegionRemSet.cpp

Print this page
rev 9982 : 8147087: Race when reusing PerRegionTable bitmaps may result in dropped remembered set entries
Summary: Do not make reused PRTs available to other threads before the bitmap of the PRT has been cleared.
Contributed-by: Poonam Bajaj <poonam.bajaj@oracle.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 9983 : [mq]: 8147087-comments
   1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  93     // If the test below fails, then this table was reused concurrently
  94     // with this operation.  This is OK, since the old table was coarsened,
  95     // and adding a bit to the new table is never incorrect.
  96     // If the table used to belong to a continues humongous region and is
  97     // now reused for the corresponding start humongous region, we need to
  98     // make sure that we detect this. Thus, we call is_in_reserved_raw()
  99     // instead of just is_in_reserved() here.
 100     if (loc_hr->is_in_reserved(from)) {
 101       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 102       CardIdx_t from_card = (CardIdx_t)
 103           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 104 
 105       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
 106              "Must be in range.");
 107       add_card_work(from_card, par);
 108     }
 109   }
 110 
 111 public:
 112 
 113   HeapRegion* hr() const { return _hr; }


 114 
 115   jint occupied() const {
 116     // Overkill, but if we ever need it...
 117     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 118     return _occupied;
 119   }
 120 
 121   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 122     if (clear_links_to_all_list) {
 123       set_next(NULL);
 124       set_prev(NULL);
 125     }
 126     _hr = hr;
 127     _collision_list_next = NULL;
 128     _occupied = 0;
 129     _bm.clear();



 130   }
 131 
 132   void add_reference(OopOrNarrowOopStar from) {
 133     add_reference_work(from, /*parallel*/ true);
 134   }
 135 
 136   void seq_add_reference(OopOrNarrowOopStar from) {
 137     add_reference_work(from, /*parallel*/ false);
 138   }
 139 
 140   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
 141     HeapWord* hr_bot = hr()->bottom();
 142     size_t hr_first_card_index = ctbs->index_for(hr_bot);
 143     bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
 144     recount_occupied();
 145   }
 146 
 147   void add_card(CardIdx_t from_card_index) {
 148     add_card_work(from_card_index, /*parallel*/ true);
 149   }


 340   }
 341 
 342   prt->set_next(NULL);
 343   prt->set_prev(NULL);
 344 
 345   assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
 346          (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
 347          "just checking");
 348   assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
 349          "just checking");
 350   assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
 351          "just checking");
 352 }
 353 
 354 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
 355   uint cur_hrm_ind = _hr->hrm_index();
 356 
 357   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
 358 
 359   if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
 360     assert(contains_reference(from), "We just added it!");
 361     return;
 362   }
 363 
 364   // Note that this may be a continued H region.
 365   HeapRegion* from_hr = _g1h->heap_region_containing(from);
 366   RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
 367 
 368   // If the region is already coarsened, return.
 369   if (_coarse_map.at(from_hrm_ind)) {
 370     assert(contains_reference(from), "We just added it!");
 371     return;
 372   }
 373 
 374   // Otherwise find a per-region table to add it to.
 375   size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
 376   PerRegionTable* prt = find_region_table(ind, from_hr);
 377   if (prt == NULL) {
 378     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 379     // Confirm that it's really not there...
 380     prt = find_region_table(ind, from_hr);
 381     if (prt == NULL) {
 382 
 383       uintptr_t from_hr_bot_card_index =
 384         uintptr_t(from_hr->bottom())
 385           >> CardTableModRefBS::card_shift;
 386       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 387       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 388              "Must be in range.");
 389       if (G1HRRSUseSparseTable &&
 390           _sparse_table.add_card(from_hrm_ind, card_index)) {
 391         assert(contains_reference_locked(from), "We just added it!");
 392         return;
 393       }
 394 
 395       if (_n_fine_entries == _max_fine_entries) {
 396         prt = delete_region_table();
 397         // There is no need to clear the links to the 'all' list here:
 398         // prt will be reused immediately, i.e. remain in the 'all' list.
 399         prt->init(from_hr, false /* clear_links_to_all_list */);
 400       } else {
 401         prt = PerRegionTable::alloc(from_hr);
 402         link_to_all(prt);
 403       }
 404 
 405       PerRegionTable* first_prt = _fine_grain_regions[ind];
 406       prt->set_collision_list_next(first_prt);
 407       // The assignment into _fine_grain_regions allows the prt to
 408       // start being used concurrently. In addition to
 409       // collision_list_next which must be visible (else concurrent
 410       // parsing of the list, if any, may fail to see other entries),
 411       // the content of the prt must be visible (else for instance


 421         assert(sprt_entry != NULL, "There should have been an entry");
 422         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
 423           CardIdx_t c = sprt_entry->card(i);
 424           if (c != SparsePRTEntry::NullEntry) {
 425             prt->add_card(c);
 426           }
 427         }
 428         // Now we can delete the sparse entry.
 429         bool res = _sparse_table.delete_entry(from_hrm_ind);
 430         assert(res, "It should have been there.");
 431       }
 432     }
 433     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 434   }
 435   // Note that we can't assert "prt->hr() == from_hr", because of the
 436   // possibility of concurrent reuse.  But see head comment of
 437   // OtherRegionsTable for why this is OK.
 438   assert(prt != NULL, "Inv");
 439 
 440   prt->add_reference(from);
 441   assert(contains_reference(from), "We just added it!");
 442 }
 443 
 444 PerRegionTable*
 445 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
 446   assert(ind < _max_fine_entries, "Preconditions.");
 447   PerRegionTable* prt = _fine_grain_regions[ind];
 448   while (prt != NULL && prt->hr() != hr) {
 449     prt = prt->collision_list_next();
 450   }
 451   // Loop postcondition is the method postcondition.
 452   return prt;
 453 }
 454 
 455 jint OtherRegionsTable::_n_coarsenings = 0;
 456 
 457 PerRegionTable* OtherRegionsTable::delete_region_table() {
 458   assert(_m->owned_by_self(), "Precondition");
 459   assert(_n_fine_entries == _max_fine_entries, "Precondition");
 460   PerRegionTable* max = NULL;
 461   jint max_occ = 0;


   1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  93     // If the test below fails, then this table was reused concurrently
  94     // with this operation.  This is OK, since the old table was coarsened,
  95     // and adding a bit to the new table is never incorrect.
  96     // If the table used to belong to a continues humongous region and is
  97     // now reused for the corresponding start humongous region, we need to
  98     // make sure that we detect this. Thus, we call is_in_reserved_raw()
  99     // instead of just is_in_reserved() here.
 100     if (loc_hr->is_in_reserved(from)) {
 101       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
 102       CardIdx_t from_card = (CardIdx_t)
 103           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 104 
 105       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
 106              "Must be in range.");
 107       add_card_work(from_card, par);
 108     }
 109   }
 110 
 111 public:
 112 
 113   HeapRegion* hr() const { 
 114     return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
 115   }
 116 
 117   jint occupied() const {
 118     // Overkill, but if we ever need it...
 119     // guarantee(_occupied == _bm.count_one_bits(), "Check");
 120     return _occupied;
 121   }
 122 
 123   void init(HeapRegion* hr, bool clear_links_to_all_list) {
 124     if (clear_links_to_all_list) {
 125       set_next(NULL);
 126       set_prev(NULL);
 127     }

 128     _collision_list_next = NULL;
 129     _occupied = 0;
 130     _bm.clear();
 131     // Make sure that the bitmap clearing above has been finished before publishing
 132     // this PRT to concurrent threads.
 133     OrderAccess::release_store_ptr(&_hr, hr);
 134   }
 135 
 136   void add_reference(OopOrNarrowOopStar from) {
 137     add_reference_work(from, /*parallel*/ true);
 138   }
 139 
 140   void seq_add_reference(OopOrNarrowOopStar from) {
 141     add_reference_work(from, /*parallel*/ false);
 142   }
 143 
 144   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
 145     HeapWord* hr_bot = hr()->bottom();
 146     size_t hr_first_card_index = ctbs->index_for(hr_bot);
 147     bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
 148     recount_occupied();
 149   }
 150 
 151   void add_card(CardIdx_t from_card_index) {
 152     add_card_work(from_card_index, /*parallel*/ true);
 153   }


 344   }
 345 
 346   prt->set_next(NULL);
 347   prt->set_prev(NULL);
 348 
 349   assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
 350          (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
 351          "just checking");
 352   assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
 353          "just checking");
 354   assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
 355          "just checking");
 356 }
 357 
 358 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
 359   uint cur_hrm_ind = _hr->hrm_index();
 360 
 361   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
 362 
 363   if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
 364     assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
 365     return;
 366   }
 367 
 368   // Note that this may be a continued H region.
 369   HeapRegion* from_hr = _g1h->heap_region_containing(from);
 370   RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
 371 
 372   // If the region is already coarsened, return.
 373   if (_coarse_map.at(from_hrm_ind)) {
 374     assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
 375     return;
 376   }
 377 
 378   // Otherwise find a per-region table to add it to.
 379   size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
 380   PerRegionTable* prt = find_region_table(ind, from_hr);
 381   if (prt == NULL) {
 382     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
 383     // Confirm that it's really not there...
 384     prt = find_region_table(ind, from_hr);
 385     if (prt == NULL) {
 386 
 387       uintptr_t from_hr_bot_card_index =
 388         uintptr_t(from_hr->bottom())
 389           >> CardTableModRefBS::card_shift;
 390       CardIdx_t card_index = from_card - from_hr_bot_card_index;
 391       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
 392              "Must be in range.");
 393       if (G1HRRSUseSparseTable &&
 394           _sparse_table.add_card(from_hrm_ind, card_index)) {
 395         assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
 396         return;
 397       }
 398 
 399       if (_n_fine_entries == _max_fine_entries) {
 400         prt = delete_region_table();
 401         // There is no need to clear the links to the 'all' list here:
 402         // prt will be reused immediately, i.e. remain in the 'all' list.
 403         prt->init(from_hr, false /* clear_links_to_all_list */);
 404       } else {
 405         prt = PerRegionTable::alloc(from_hr);
 406         link_to_all(prt);
 407       }
 408 
 409       PerRegionTable* first_prt = _fine_grain_regions[ind];
 410       prt->set_collision_list_next(first_prt);
 411       // The assignment into _fine_grain_regions allows the prt to
 412       // start being used concurrently. In addition to
 413       // collision_list_next which must be visible (else concurrent
 414       // parsing of the list, if any, may fail to see other entries),
 415       // the content of the prt must be visible (else for instance


 425         assert(sprt_entry != NULL, "There should have been an entry");
 426         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
 427           CardIdx_t c = sprt_entry->card(i);
 428           if (c != SparsePRTEntry::NullEntry) {
 429             prt->add_card(c);
 430           }
 431         }
 432         // Now we can delete the sparse entry.
 433         bool res = _sparse_table.delete_entry(from_hrm_ind);
 434         assert(res, "It should have been there.");
 435       }
 436     }
 437     assert(prt != NULL && prt->hr() == from_hr, "consequence");
 438   }
 439   // Note that we can't assert "prt->hr() == from_hr", because of the
 440   // possibility of concurrent reuse.  But see head comment of
 441   // OtherRegionsTable for why this is OK.
 442   assert(prt != NULL, "Inv");
 443 
 444   prt->add_reference(from);
 445   assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
 446 }
 447 
 448 PerRegionTable*
 449 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
 450   assert(ind < _max_fine_entries, "Preconditions.");
 451   PerRegionTable* prt = _fine_grain_regions[ind];
 452   while (prt != NULL && prt->hr() != hr) {
 453     prt = prt->collision_list_next();
 454   }
 455   // Loop postcondition is the method postcondition.
 456   return prt;
 457 }
 458 
 459 jint OtherRegionsTable::_n_coarsenings = 0;
 460 
 461 PerRegionTable* OtherRegionsTable::delete_region_table() {
 462   assert(_m->owned_by_self(), "Precondition");
 463   assert(_n_fine_entries == _max_fine_entries, "Precondition");
 464   PerRegionTable* max = NULL;
 465   jint max_occ = 0;


< prev index next >