< prev index next >

src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp

Print this page
rev 8070 : imported patch gencollected_heap_cleanup


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_interface/collectedHeap.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/cardTableModRefBS.hpp"
  29 #include "memory/cardTableRS.hpp"

  30 #include "memory/space.inline.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/orderAccess.inline.hpp"
  36 #include "runtime/virtualspace.hpp"
  37 #include "runtime/vmThread.hpp"
  38 
  39 void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
  40                                                              OopsInGenClosure* cl,
  41                                                              CardTableRS* ct,
  42                                                              int n_threads) {
  43   assert(n_threads > 0, "Error: expected n_threads > 0");
  44   assert((n_threads == 1 && ParallelGCThreads == 0) ||
  45          n_threads <= (int)ParallelGCThreads,
  46          "# worker threads != # requested!");
  47   assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
  48   assert(UseDynamicNumberOfGCThreads ||
  49          !FLAG_IS_DEFAULT(ParallelGCThreads) ||
  50          n_threads == (int)ParallelGCThreads,
  51          "# worker threads != # requested!");


 432                         uintptr_t& lowest_non_clean_base_chunk_index,
 433                         size_t& lowest_non_clean_chunk_size) {
 434 
 435   int       i        = find_covering_region_containing(sp->bottom());
 436   MemRegion covered  = _covered[i];
 437   size_t    n_chunks = chunks_to_cover(covered);
 438 
 439   // Only the first thread to obtain the lock will resize the
 440   // LNC array for the covered region.  Any later expansion can't affect
 441   // the used_at_save_marks region.
 442   // (I observed a bug in which the first thread to execute this would
 443   // resize, and then it would cause "expand_and_allocate" that would
 444   // increase the number of chunks in the covered region.  Then a second
 445   // thread would come and execute this, see that the size didn't match,
 446   // and free and allocate again.  So the first thread would be using a
 447   // freed "_lowest_non_clean" array.)
 448 
 449   // Do a dirty read here. If we pass the conditional then take the rare
 450   // event lock and do the read again in case some other thread had already
 451   // succeeded and done the resize.
 452   int cur_collection = Universe::heap()->total_collections();
 453   if (_last_LNC_resizing_collection[i] != cur_collection) {
 454     MutexLocker x(ParGCRareEvent_lock);
 455     if (_last_LNC_resizing_collection[i] != cur_collection) {
 456       if (_lowest_non_clean[i] == NULL ||
 457           n_chunks != _lowest_non_clean_chunk_size[i]) {
 458 
 459         // Should we delete the old?
 460         if (_lowest_non_clean[i] != NULL) {
 461           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 462                  "logical consequence");
 463           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
 464           _lowest_non_clean[i] = NULL;
 465         }
 466         // Now allocate a new one if necessary.
 467         if (_lowest_non_clean[i] == NULL) {
 468           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 469           _lowest_non_clean_chunk_size[i]       = n_chunks;
 470           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 471           for (int j = 0; j < (int)n_chunks; j++)
 472             _lowest_non_clean[i][j] = NULL;


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_interface/collectedHeap.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/cardTableModRefBS.hpp"
  29 #include "memory/cardTableRS.hpp"
  30 #include "memory/genCollectedHeap.hpp"
  31 #include "memory/space.inline.hpp"

  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/orderAccess.inline.hpp"
  36 #include "runtime/virtualspace.hpp"
  37 #include "runtime/vmThread.hpp"
  38 
  39 void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
  40                                                              OopsInGenClosure* cl,
  41                                                              CardTableRS* ct,
  42                                                              int n_threads) {
  43   assert(n_threads > 0, "Error: expected n_threads > 0");
  44   assert((n_threads == 1 && ParallelGCThreads == 0) ||
  45          n_threads <= (int)ParallelGCThreads,
  46          "# worker threads != # requested!");
  47   assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
  48   assert(UseDynamicNumberOfGCThreads ||
  49          !FLAG_IS_DEFAULT(ParallelGCThreads) ||
  50          n_threads == (int)ParallelGCThreads,
  51          "# worker threads != # requested!");


 432                         uintptr_t& lowest_non_clean_base_chunk_index,
 433                         size_t& lowest_non_clean_chunk_size) {
 434 
 435   int       i        = find_covering_region_containing(sp->bottom());
 436   MemRegion covered  = _covered[i];
 437   size_t    n_chunks = chunks_to_cover(covered);
 438 
 439   // Only the first thread to obtain the lock will resize the
 440   // LNC array for the covered region.  Any later expansion can't affect
 441   // the used_at_save_marks region.
 442   // (I observed a bug in which the first thread to execute this would
 443   // resize, and then it would cause "expand_and_allocate" that would
 444   // increase the number of chunks in the covered region.  Then a second
 445   // thread would come and execute this, see that the size didn't match,
 446   // and free and allocate again.  So the first thread would be using a
 447   // freed "_lowest_non_clean" array.)
 448 
 449   // Do a dirty read here. If we pass the conditional then take the rare
 450   // event lock and do the read again in case some other thread had already
 451   // succeeded and done the resize.
 452   int cur_collection = GenCollectedHeap::heap()->total_collections();
 453   if (_last_LNC_resizing_collection[i] != cur_collection) {
 454     MutexLocker x(ParGCRareEvent_lock);
 455     if (_last_LNC_resizing_collection[i] != cur_collection) {
 456       if (_lowest_non_clean[i] == NULL ||
 457           n_chunks != _lowest_non_clean_chunk_size[i]) {
 458 
 459         // Should we delete the old?
 460         if (_lowest_non_clean[i] != NULL) {
 461           assert(n_chunks != _lowest_non_clean_chunk_size[i],
 462                  "logical consequence");
 463           FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
 464           _lowest_non_clean[i] = NULL;
 465         }
 466         // Now allocate a new one if necessary.
 467         if (_lowest_non_clean[i] == NULL) {
 468           _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
 469           _lowest_non_clean_chunk_size[i]       = n_chunks;
 470           _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
 471           for (int j = 0; j < (int)n_chunks; j++)
 472             _lowest_non_clean[i][j] = NULL;
< prev index next >