< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




4724         end   = space->top();
4725       } else if (nth_task == 0) {
4726         start = space->bottom();
4727         end   = chunk_array[nth_task];
4728       } else if (nth_task < (uint)chunk_top) {
4729         assert(nth_task >= 1, "Control point invariant");
4730         start = chunk_array[nth_task - 1];
4731         end   = chunk_array[nth_task];
4732       } else {
4733         assert(nth_task == (uint)chunk_top, "Control point invariant");
4734         start = chunk_array[chunk_top - 1];
4735         end   = space->top();
4736       }
4737       MemRegion mr(start, end);
4738       // Verify that mr is in space
4739       assert(mr.is_empty() || space->used_region().contains(mr),
4740              "Should be in space");
4741       // Verify that "start" is an object boundary
4742       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4743              "Should be an oop");







4744       space->par_oop_iterate(mr, cl);
4745     }
4746     pst->all_tasks_completed();
4747   }
4748 }
4749 
4750 void
4751 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4752   CompactibleFreeListSpace* sp, int i,
4753   Par_MarkRefsIntoAndScanClosure* cl) {
4754   // Until all tasks completed:
4755   // . claim an unclaimed task
4756   // . compute region boundaries corresponding to task claimed
4757   // . transfer dirty bits ct->mut for that region
4758   // . apply rescanclosure to dirty mut bits for that region
4759 
4760   ResourceMark rm;
4761   HandleMark   hm;
4762 
4763   OopTaskQueue* work_q = work_queue(i);


4938 void CMSCollector::reset_survivor_plab_arrays() {
4939   for (uint i = 0; i < ParallelGCThreads; i++) {
4940     _survivor_plab_array[i].reset();
4941   }
4942 }
4943 
4944 // Merge the per-thread plab arrays into the global survivor chunk
4945 // array which will provide the partitioning of the survivor space
4946 // for CMS initial scan and rescan.
4947 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4948                                               int no_of_gc_threads) {
4949   assert(_survivor_plab_array  != NULL, "Error");
4950   assert(_survivor_chunk_array != NULL, "Error");
4951   assert(_collectorState == FinalMarking ||
4952          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4953   for (int j = 0; j < no_of_gc_threads; j++) {
4954     _cursor[j] = 0;
4955   }
4956   HeapWord* top = surv->top();
4957   size_t i;




4958   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4959     HeapWord* min_val = top;          // Higher than any PLAB address
4960     uint      min_tid = 0;            // position of min_val this round
4961     for (int j = 0; j < no_of_gc_threads; j++) {
4962       ChunkArray* cur_sca = &_survivor_plab_array[j];
4963       if (_cursor[j] == cur_sca->end()) {
4964         continue;
4965       }
4966       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4967       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4968       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4969       if (cur_val < min_val) {
4970         min_tid = j;
4971         min_val = cur_val;
4972       } else {
4973         assert(cur_val < top, "All recorded addresses should be less");
4974       }
4975     }
4976     // At this point min_val and min_tid are respectively
4977     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4978     // and the thread (j) that witnesses that address.
4979     // We record this address in the _survivor_chunk_array[i]
4980     // and increment _cursor[min_tid] prior to the next round i.
4981     if (min_val == top) {
4982       break;
4983     }
4984     _survivor_chunk_array[i] = min_val;
4985     _cursor[min_tid]++;
4986   }
4987   // We are all done; record the size of the _survivor_chunk_array
4988   _survivor_chunk_index = i; // exclusive: [0, i)
4989   if (PrintCMSStatistics > 0) {
4990     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4991   }
4992   // Verify that we used up all the recorded entries
4993   #ifdef ASSERT
4994     size_t total = 0;
4995     for (int j = 0; j < no_of_gc_threads; j++) {
4996       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");



4997       total += _cursor[j];
4998     }
4999     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");


5000     // Check that the merged array is in sorted order
5001     if (total > 0) {
5002       for (size_t i = 0; i < total - 1; i++) {
5003         if (PrintCMSStatistics > 0) {
5004           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5005                               i, p2i(_survivor_chunk_array[i]));
5006         }
5007         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5008                "Not sorted");
5009       }
5010     }
5011   #endif // ASSERT
5012 }
5013 
5014 // Set up the space's par_seq_tasks structure for work claiming
5015 // for parallel initial scan and rescan of young gen.
5016 // See ParRescanTask where this is currently used.
5017 void
5018 CMSCollector::
5019 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5020   assert(n_threads > 0, "Unexpected n_threads argument");
5021 
5022   // Eden space




4724         end   = space->top();
4725       } else if (nth_task == 0) {
4726         start = space->bottom();
4727         end   = chunk_array[nth_task];
4728       } else if (nth_task < (uint)chunk_top) {
4729         assert(nth_task >= 1, "Control point invariant");
4730         start = chunk_array[nth_task - 1];
4731         end   = chunk_array[nth_task];
4732       } else {
4733         assert(nth_task == (uint)chunk_top, "Control point invariant");
4734         start = chunk_array[chunk_top - 1];
4735         end   = space->top();
4736       }
4737       MemRegion mr(start, end);
4738       // Verify that mr is in space
4739       assert(mr.is_empty() || space->used_region().contains(mr),
4740              "Should be in space");
4741       // Verify that "start" is an object boundary
4742       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4743              "Should be an oop");
4744       // Inspect that there is good distribution of rescan tasks
4745       #ifdef ASSERT
4746         if (PrintCMSStatistics != 0) {
4747           tty->print_cr("rescan task=" UINT32_FORMAT "/" UINT32_FORMAT ", " PTR_FORMAT "-" PTR_FORMAT ", size=" SIZE_FORMAT".",
4748                         nth_task, n_tasks, p2i(mr.start()), p2i(mr.end()), mr.word_size());
4749         }
4750       #endif
4751       space->par_oop_iterate(mr, cl);
4752     }
4753     pst->all_tasks_completed();
4754   }
4755 }
4756 
4757 void
4758 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4759   CompactibleFreeListSpace* sp, int i,
4760   Par_MarkRefsIntoAndScanClosure* cl) {
4761   // Until all tasks completed:
4762   // . claim an unclaimed task
4763   // . compute region boundaries corresponding to task claimed
4764   // . transfer dirty bits ct->mut for that region
4765   // . apply rescanclosure to dirty mut bits for that region
4766 
4767   ResourceMark rm;
4768   HandleMark   hm;
4769 
4770   OopTaskQueue* work_q = work_queue(i);


4945 void CMSCollector::reset_survivor_plab_arrays() {
4946   for (uint i = 0; i < ParallelGCThreads; i++) {
4947     _survivor_plab_array[i].reset();
4948   }
4949 }
4950 
4951 // Merge the per-thread plab arrays into the global survivor chunk
4952 // array which will provide the partitioning of the survivor space
4953 // for CMS initial scan and rescan.
4954 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4955                                               int no_of_gc_threads) {
4956   assert(_survivor_plab_array  != NULL, "Error");
4957   assert(_survivor_chunk_array != NULL, "Error");
4958   assert(_collectorState == FinalMarking ||
4959          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4960   for (int j = 0; j < no_of_gc_threads; j++) {
4961     _cursor[j] = 0;
4962   }
4963   HeapWord* top = surv->top();
4964   size_t i;
4965   // Stride through the _survivor_plab_arrays based on the number of gc threads so
4966   // the whole structure is scanned regardless of the min plab size. This will
4967   // produce a more even distribution of scan tasks for the parallel phase.
4968   size_t stride = ParallelGCThreads > 4 ? ParallelGCThreads/2 : ParallelGCThreads;
4969   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4970     HeapWord* min_val = top;          // Higher than any PLAB address
4971     uint      min_tid = 0;            // position of min_val this round
4972     for (int j = 0; j < no_of_gc_threads; j++) {
4973       ChunkArray* cur_sca = &_survivor_plab_array[j];
4974       if (_cursor[j] >= cur_sca->end()) {
4975         continue;
4976       }
4977       assert(_cursor[j] < (cur_sca->end()), "ctl pt invariant");
4978       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4979       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4980       if (cur_val < min_val) {
4981         min_tid = j;
4982         min_val = cur_val;
4983       } else {
4984         assert(cur_val < top, "All recorded addresses should be less");
4985       }
4986     }
4987     // At this point min_val and min_tid are respectively
4988     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4989     // and the thread (j) that witnesses that address.
4990     // We record this address in the _survivor_chunk_array[i]
4991     // and increment _cursor[min_tid] prior to the next round i.
4992     if (min_val == top) {
4993       break;
4994     }
4995     _survivor_chunk_array[i] = min_val;
4996     _cursor[min_tid] += stride;
4997   }
4998   // We are all done; record the size of the _survivor_chunk_array
4999   _survivor_chunk_index = i; // exclusive: [0, i)
5000   if (PrintCMSStatistics > 0) {
5001     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5002   }
5003   // Verify that we used up all the recorded entries
5004   #ifdef ASSERT
5005     size_t total = 0;
5006     for (int j = 0; j < no_of_gc_threads; j++) {
5007       assert(_cursor[j] >= _survivor_plab_array[j].end() && _cursor[j] < (_survivor_plab_array[j].end() + stride) ,
5008              err_msg("Did not use all entries of thread  " INT32_FORMAT ": _cursor[]=" SIZE_FORMAT
5009                       " != survivor_plab_array[]=" SIZE_FORMAT,
5010                       j, _cursor[j], _survivor_plab_array[j].end()));
5011       total += _cursor[j];
5012     }
5013     assert(total == (_survivor_chunk_index * stride),
5014            err_msg("Survivor stride error: total=" SIZE_FORMAT ": _survivor_chunk_index=" SIZE_FORMAT,
5015                    total, _survivor_chunk_index));
5016     // Check that the merged array is in sorted order
5017     if (_survivor_chunk_index > 0) {
5018       for (size_t i = 0; i < _survivor_chunk_index - 1; i++) {
5019         if (PrintCMSStatistics > 0) {
5020           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5021                               i, p2i(_survivor_chunk_array[i]));
5022         }
5023         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5024                "Not sorted");
5025       }
5026     }
5027   #endif // ASSERT
5028 }
5029 
5030 // Set up the space's par_seq_tasks structure for work claiming
5031 // for parallel initial scan and rescan of young gen.
5032 // See ParRescanTask where this is currently used.
5033 void
5034 CMSCollector::
5035 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5036   assert(n_threads > 0, "Unexpected n_threads argument");
5037 
5038   // Eden space


< prev index next >