< prev index next >

src/share/vm/gc/cms/parCardTableModRefBS.cpp

Print this page
rev 12341 : 8170409: CMS: Crash in CardTableModRefBSForCTRS::process_chunk_boundaries
Reviewed-by: simonis
Contributed-by: gunter.haug@sap.com


 378                         size_t& lowest_non_clean_chunk_size) {
 379 
 380   int       i        = find_covering_region_containing(sp->bottom());
 381   MemRegion covered  = _covered[i];
 382   size_t    n_chunks = chunks_to_cover(covered);
 383 
 384   // Only the first thread to obtain the lock will resize the
 385   // LNC array for the covered region.  Any later expansion can't affect
 386   // the used_at_save_marks region.
 387   // (I observed a bug in which the first thread to execute this would
 388   // resize, and then it would cause "expand_and_allocate" that would
 389   // increase the number of chunks in the covered region.  Then a second
 390   // thread would come and execute this, see that the size didn't match,
 391   // and free and allocate again.  So the first thread would be using a
 392   // freed "_lowest_non_clean" array.)
 393 
 394   // Do a dirty read here. If we pass the conditional then take the rare
 395   // event lock and do the read again in case some other thread had already
 396   // succeeded and done the resize.
 397   int cur_collection = GenCollectedHeap::heap()->total_collections();
 398   if (_last_LNC_resizing_collection[i] != cur_collection) {



 399     MutexLocker x(ParGCRareEvent_lock);
 400     if (_last_LNC_resizing_collection[i] != cur_collection) {
 401       if (_lowest_non_clean[i] == NULL ||
 402           n_chunks != _lowest_non_clean_chunk_size[i]) {
 403 
 404         // Should we delete the old?
 405         if (_lowest_non_clean[i] != NULL) {
 406           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 407                  "logical consequence");
 408           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
 409           _lowest_non_clean[i] = NULL;
 410         }
 411         // Now allocate a new one if necessary.
 412         if (_lowest_non_clean[i] == NULL) {
 413           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 414           _lowest_non_clean_chunk_size[i]       = n_chunks;
 415           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 416           for (int j = 0; j < (int)n_chunks; j++)
 417             _lowest_non_clean[i][j] = NULL;
 418         }
 419       }
 420       _last_LNC_resizing_collection[i] = cur_collection;

 421     }
 422   }
 423   // In any case, now do the initialization.
 424   lowest_non_clean                  = _lowest_non_clean[i];
 425   lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
 426   lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
 427 }


 378                         size_t& lowest_non_clean_chunk_size) {
 379 
 380   int       i        = find_covering_region_containing(sp->bottom());
 381   MemRegion covered  = _covered[i];
 382   size_t    n_chunks = chunks_to_cover(covered);
 383 
 384   // Only the first thread to obtain the lock will resize the
 385   // LNC array for the covered region.  Any later expansion can't affect
 386   // the used_at_save_marks region.
 387   // (I observed a bug in which the first thread to execute this would
 388   // resize, and then it would cause "expand_and_allocate" that would
 389   // increase the number of chunks in the covered region.  Then a second
 390   // thread would come and execute this, see that the size didn't match,
 391   // and free and allocate again.  So the first thread would be using a
 392   // freed "_lowest_non_clean" array.)
 393 
 394   // Do a dirty read here. If we pass the conditional then take the rare
 395   // event lock and do the read again in case some other thread had already
 396   // succeeded and done the resize.
 397   int cur_collection = GenCollectedHeap::heap()->total_collections();
 398   // Updated _last_LNC_resizing_collection[i] must not be visible before
 399   // _lowest_non_clean and friends are visible. Therefore use acquire/release
 400   // to guarantee this on non TSO architecures.
 401   if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
 402     MutexLocker x(ParGCRareEvent_lock);
 403     if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
 404       if (_lowest_non_clean[i] == NULL ||
 405           n_chunks != _lowest_non_clean_chunk_size[i]) {
 406 
 407         // Should we delete the old?
 408         if (_lowest_non_clean[i] != NULL) {
 409           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 410                  "logical consequence");
 411           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
 412           _lowest_non_clean[i] = NULL;
 413         }
 414         // Now allocate a new one if necessary.
 415         if (_lowest_non_clean[i] == NULL) {
 416           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 417           _lowest_non_clean_chunk_size[i]       = n_chunks;
 418           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 419           for (int j = 0; j < (int)n_chunks; j++)
 420             _lowest_non_clean[i][j] = NULL;
 421         }
 422       }
 423       // Make sure this gets visible only after _lowest_non_clean* was initialized
 424       OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
 425     }
 426   }
 427   // In any case, now do the initialization.
 428   lowest_non_clean                  = _lowest_non_clean[i];
 429   lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
 430   lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
 431 }
< prev index next >