< prev index next >

src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp

Print this page
rev 8155 : 8170409: CMS: Crash in CardTableModRefBSForCTRS::process_chunk_boundaries


 435                         size_t& lowest_non_clean_chunk_size) {
 436 
 437   int       i        = find_covering_region_containing(sp->bottom());
 438   MemRegion covered  = _covered[i];
 439   size_t    n_chunks = chunks_to_cover(covered);
 440 
 441   // Only the first thread to obtain the lock will resize the
 442   // LNC array for the covered region.  Any later expansion can't affect
 443   // the used_at_save_marks region.
 444   // (I observed a bug in which the first thread to execute this would
 445   // resize, and then it would cause "expand_and_allocate" that would
 446   // increase the number of chunks in the covered region.  Then a second
 447   // thread would come and execute this, see that the size didn't match,
 448   // and free and allocate again.  So the first thread would be using a
 449   // freed "_lowest_non_clean" array.)
 450 
 451   // Do a dirty read here. If we pass the conditional then take the rare
 452   // event lock and do the read again in case some other thread had already
 453   // succeeded and done the resize.
 454   int cur_collection = Universe::heap()->total_collections();
 455   if (_last_LNC_resizing_collection[i] != cur_collection) {



 456     MutexLocker x(ParGCRareEvent_lock);
 457     if (_last_LNC_resizing_collection[i] != cur_collection) {

 458       if (_lowest_non_clean[i] == NULL ||
 459           n_chunks != _lowest_non_clean_chunk_size[i]) {
 460 
 461         // Should we delete the old?
 462         if (_lowest_non_clean[i] != NULL) {
 463           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 464                  "logical consequence");
 465           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC);
 466           _lowest_non_clean[i] = NULL;
 467         }
 468         // Now allocate a new one if necessary.
 469         if (_lowest_non_clean[i] == NULL) {
 470           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 471           _lowest_non_clean_chunk_size[i]       = n_chunks;
 472           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 473           for (int j = 0; j < (int)n_chunks; j++)
 474             _lowest_non_clean[i][j] = NULL;
 475         }
 476       }
 477       _last_LNC_resizing_collection[i] = cur_collection;

 478     }
 479   }
 480   // In any case, now do the initialization.
 481   lowest_non_clean                  = _lowest_non_clean[i];
 482   lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
 483   lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
 484 }


 435                         size_t& lowest_non_clean_chunk_size) {
 436 
 437   int       i        = find_covering_region_containing(sp->bottom());
 438   MemRegion covered  = _covered[i];
 439   size_t    n_chunks = chunks_to_cover(covered);
 440 
 441   // Only the first thread to obtain the lock will resize the
 442   // LNC array for the covered region.  Any later expansion can't affect
 443   // the used_at_save_marks region.
 444   // (I observed a bug in which the first thread to execute this would
 445   // resize, and then it would cause "expand_and_allocate" that would
 446   // increase the number of chunks in the covered region.  Then a second
 447   // thread would come and execute this, see that the size didn't match,
 448   // and free and allocate again.  So the first thread would be using a
 449   // freed "_lowest_non_clean" array.)
 450 
 451   // Do a dirty read here. If we pass the conditional then take the rare
 452   // event lock and do the read again in case some other thread had already
 453   // succeeded and done the resize.
 454   int cur_collection = Universe::heap()->total_collections();
 455   // Updated _last_LNC_resizing_collection[i] must not be visible before
 456   // _lowest_non_clean and friends are visible. Therefore use acquire/release
 457   // to guarantee this on non TSO architecures.
 458   if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
 459     MutexLocker x(ParGCRareEvent_lock);
 460     // This load_acquire is here for clarity only. The MutexLocker already fences.
 461     if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
 462       if (_lowest_non_clean[i] == NULL ||
 463           n_chunks != _lowest_non_clean_chunk_size[i]) {
 464 
 465         // Should we delete the old?
 466         if (_lowest_non_clean[i] != NULL) {
 467           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 468                  "logical consequence");
 469           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC);
 470           _lowest_non_clean[i] = NULL;
 471         }
 472         // Now allocate a new one if necessary.
 473         if (_lowest_non_clean[i] == NULL) {
 474           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 475           _lowest_non_clean_chunk_size[i]       = n_chunks;
 476           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 477           for (int j = 0; j < (int)n_chunks; j++)
 478             _lowest_non_clean[i][j] = NULL;
 479         }
 480       }
 481       // Make sure this gets visible only after _lowest_non_clean* was initialized
 482       OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
 483     }
 484   }
 485   // In any case, now do the initialization.
 486   lowest_non_clean                  = _lowest_non_clean[i];
 487   lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
 488   lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
 489 }
< prev index next >