< prev index next >

src/share/vm/gc/cms/compactibleFreeListSpace.cpp

Print this page




  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),


2856   pst->set_n_tasks((int)n_tasks);
2857 }
2858 
2859 // Set up the space's par_seq_tasks structure for work claiming
2860 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2861 void
2862 CompactibleFreeListSpace::
2863 initialize_sequential_subtasks_for_marking(int n_threads,
2864                                            HeapWord* low) {
2865   // The "size" of each task is fixed according to rescan_task_size.
2866   assert(n_threads > 0, "Unexpected n_threads argument");
2867   const size_t task_size = marking_task_size();
2868   assert(task_size > CardTableModRefBS::card_size_in_words &&
2869          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2870          "Otherwise arithmetic below would be incorrect");
2871   MemRegion span = _old_gen->reserved();
2872   if (low != NULL) {
2873     if (span.contains(low)) {
2874       // Align low down to  a card boundary so that
2875       // we can use block_offset_careful() on span boundaries.
2876       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2877                                  CardTableModRefBS::card_size);
2878       // Clip span prefix at aligned_low
2879       span = span.intersection(MemRegion(aligned_low, span.end()));
2880     } else if (low > span.end()) {
2881       span = MemRegion(low, low);  // Null region
2882     } // else use entire span
2883   }
2884   assert(span.is_empty() ||
2885          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2886         "span should start at a card boundary");
2887   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2888   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2889   assert(n_tasks == 0 ||
2890          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2891           (span.start() + n_tasks*task_size >= span.end())),
2892          "n_tasks calculation incorrect");
2893   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2894   assert(!pst->valid(), "Clobbering existing data?");
2895   // Sets the condition for completion of the subtask (how many threads
2896   // need to finish in order to be done).
2897   pst->set_n_threads(n_threads);


  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),


2856   pst->set_n_tasks((int)n_tasks);
2857 }
2858 
2859 // Set up the space's par_seq_tasks structure for work claiming
2860 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2861 void
2862 CompactibleFreeListSpace::
2863 initialize_sequential_subtasks_for_marking(int n_threads,
2864                                            HeapWord* low) {
2865   // The "size" of each task is fixed according to rescan_task_size.
2866   assert(n_threads > 0, "Unexpected n_threads argument");
2867   const size_t task_size = marking_task_size();
2868   assert(task_size > CardTableModRefBS::card_size_in_words &&
2869          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2870          "Otherwise arithmetic below would be incorrect");
2871   MemRegion span = _old_gen->reserved();
2872   if (low != NULL) {
2873     if (span.contains(low)) {
2874       // Align low down to  a card boundary so that
2875       // we can use block_offset_careful() on span boundaries.
2876       HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);

2877       // Clip span prefix at aligned_low
2878       span = span.intersection(MemRegion(aligned_low, span.end()));
2879     } else if (low > span.end()) {
2880       span = MemRegion(low, low);  // Null region
2881     } // else use entire span
2882   }
2883   assert(span.is_empty() ||
2884          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2885         "span should start at a card boundary");
2886   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2887   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2888   assert(n_tasks == 0 ||
2889          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2890           (span.start() + n_tasks*task_size >= span.end())),
2891          "n_tasks calculation incorrect");
2892   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2893   assert(!pst->valid(), "Clobbering existing data?");
2894   // Sets the condition for completion of the subtask (how many threads
2895   // need to finish in order to be done).
2896   pst->set_n_threads(n_threads);
< prev index next >