< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page




  32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  33 #include "gc/parallel/psCompactionManager.inline.hpp"
  34 #include "gc/parallel/psMarkSweep.hpp"
  35 #include "gc/parallel/psMarkSweepDecorator.hpp"
  36 #include "gc/parallel/psOldGen.hpp"
  37 #include "gc/parallel/psParallelCompact.inline.hpp"
  38 #include "gc/parallel/psPromotionManager.inline.hpp"
  39 #include "gc/parallel/psScavenge.hpp"
  40 #include "gc/parallel/psYoungGen.hpp"
  41 #include "gc/shared/gcCause.hpp"
  42 #include "gc/shared/gcHeapSummary.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcLocker.inline.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/isGCActiveMark.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/referenceProcessor.hpp"
  51 #include "gc/shared/spaceDecorator.hpp"

  52 #include "oops/instanceKlass.inline.hpp"
  53 #include "oops/instanceMirrorKlass.inline.hpp"
  54 #include "oops/methodData.hpp"
  55 #include "oops/objArrayKlass.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "runtime/atomic.inline.hpp"
  58 #include "runtime/fprofiler.hpp"
  59 #include "runtime/safepoint.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "services/management.hpp"
  62 #include "services/memTracker.hpp"
  63 #include "services/memoryService.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/stack.inline.hpp"
  66 
  67 #include <math.h>
  68 
  69 // All sizes are in HeapWords.
  70 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  71 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;


  89 
  90 const ParallelCompactData::RegionData::region_sz_t
  91 ParallelCompactData::RegionData::dc_shift = 27;
  92 
  93 const ParallelCompactData::RegionData::region_sz_t
  94 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  95 
  96 const ParallelCompactData::RegionData::region_sz_t
  97 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
  98 
  99 const ParallelCompactData::RegionData::region_sz_t
 100 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 101 
 102 const ParallelCompactData::RegionData::region_sz_t
 103 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 104 
 105 const ParallelCompactData::RegionData::region_sz_t
 106 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 107 
 108 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 109 bool      PSParallelCompact::_print_phases = false;
 110 
 111 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 112 
 113 double PSParallelCompact::_dwl_mean;
 114 double PSParallelCompact::_dwl_std_dev;
 115 double PSParallelCompact::_dwl_first_term;
 116 double PSParallelCompact::_dwl_adjustment;
 117 #ifdef  ASSERT
 118 bool   PSParallelCompact::_dwl_initialized = false;
 119 #endif  // #ifdef ASSERT
 120 
 121 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 122                        HeapWord* destination)
 123 {
 124   assert(src_region_idx != 0, "invalid src_region_idx");
 125   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 126   assert(destination != NULL, "invalid destination argument");
 127 
 128   _src_region_idx = src_region_idx;
 129   _partial_obj_size = partial_obj_size;


 176 {
 177   assert(_src_region_idx == 0, "not clear");
 178   assert(_partial_obj_size == 0, "not clear");
 179   assert(_destination == NULL, "not clear");
 180   assert(_destination_count == 0, "not clear");
 181   assert(_dest_region_addr == NULL, "not clear");
 182   assert(_first_src_addr == NULL, "not clear");
 183 }
 184 #endif  // #ifdef ASSERT
 185 
 186 
 187 void PSParallelCompact::print_on_error(outputStream* st) {
 188   _mark_bitmap.print_on_error(st);
 189 }
 190 
 191 #ifndef PRODUCT
 192 const char* PSParallelCompact::space_names[] = {
 193   "old ", "eden", "from", "to  "
 194 };
 195 
 196 void PSParallelCompact::print_region_ranges()
 197 {
 198   tty->print_cr("space  bottom     top        end        new_top");
 199   tty->print_cr("------ ---------- ---------- ---------- ----------");





 200 
 201   for (unsigned int id = 0; id < last_space_id; ++id) {
 202     const MutableSpace* space = _space_info[id].space();
 203     tty->print_cr("%u %s "
 204                   SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 205                   SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 206                   id, space_names[id],
 207                   summary_data().addr_to_region_idx(space->bottom()),
 208                   summary_data().addr_to_region_idx(space->top()),
 209                   summary_data().addr_to_region_idx(space->end()),
 210                   summary_data().addr_to_region_idx(_space_info[id].new_top()));
 211   }
 212 }
 213 
 214 void
 215 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 216 {
 217 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 218 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 219 
 220   ParallelCompactData& sd = PSParallelCompact::summary_data();
 221   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 222   tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "

 223                 REGION_IDX_FORMAT " " PTR_FORMAT " "
 224                 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 225                 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 226                 i, p2i(c->data_location()), dci, p2i(c->destination()),
 227                 c->partial_obj_size(), c->live_obj_size(),
 228                 c->data_size(), c->source_region(), c->destination_count());
 229 
 230 #undef  REGION_IDX_FORMAT
 231 #undef  REGION_DATA_FORMAT
 232 }
 233 
 234 void
 235 print_generic_summary_data(ParallelCompactData& summary_data,
 236                            HeapWord* const beg_addr,
 237                            HeapWord* const end_addr)
 238 {
 239   size_t total_words = 0;
 240   size_t i = summary_data.addr_to_region_idx(beg_addr);
 241   const size_t last = summary_data.addr_to_region_idx(end_addr);
 242   HeapWord* pdest = 0;
 243 
 244   while (i <= last) {
 245     ParallelCompactData::RegionData* c = summary_data.region(i);
 246     if (c->data_size() != 0 || c->destination() != pdest) {
 247       print_generic_summary_region(i, c);
 248       total_words += c->data_size();
 249       pdest = c->destination();
 250     }
 251     ++i;
 252   }
 253 
 254   tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 255 }
 256 
 257 void
 258 print_generic_summary_data(ParallelCompactData& summary_data,
 259                            SpaceInfo* space_info)
 260 {




 261   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 262     const MutableSpace* space = space_info[id].space();
 263     print_generic_summary_data(summary_data, space->bottom(),
 264                                MAX2(space->top(), space_info[id].new_top()));
 265   }
 266 }
 267 
 268 void
 269 print_initial_summary_region(size_t i,
 270                              const ParallelCompactData::RegionData* c,
 271                              bool newline = true)
 272 {
 273   tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
 274              SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
 275              SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 276              i, p2i(c->destination()),
 277              c->partial_obj_size(), c->live_obj_size(),
 278              c->data_size(), c->source_region(), c->destination_count());
 279   if (newline) tty->cr();
 280 }
 281 
 282 void
 283 print_initial_summary_data(ParallelCompactData& summary_data,
 284                            const MutableSpace* space) {
 285   if (space->top() == space->bottom()) {
 286     return;
 287   }
 288 
 289   const size_t region_size = ParallelCompactData::RegionSize;
 290   typedef ParallelCompactData::RegionData RegionData;
 291   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 292   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 293   const RegionData* c = summary_data.region(end_region - 1);
 294   HeapWord* end_addr = c->destination() + c->data_size();
 295   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 296 
 297   // Print (and count) the full regions at the beginning of the space.
 298   size_t full_region_count = 0;
 299   size_t i = summary_data.addr_to_region_idx(space->bottom());
 300   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 301     print_initial_summary_region(i, summary_data.region(i));





 302     ++full_region_count;
 303     ++i;
 304   }
 305 
 306   size_t live_to_right = live_in_space - full_region_count * region_size;
 307 
 308   double max_reclaimed_ratio = 0.0;
 309   size_t max_reclaimed_ratio_region = 0;
 310   size_t max_dead_to_right = 0;
 311   size_t max_live_to_right = 0;
 312 
 313   // Print the 'reclaimed ratio' for regions while there is something live in
 314   // the region or to the right of it.  The remaining regions are empty (and
 315   // uninteresting), and computing the ratio will result in division by 0.
 316   while (i < end_region && live_to_right > 0) {
 317     c = summary_data.region(i);
 318     HeapWord* const region_addr = summary_data.region_to_addr(i);
 319     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 320     const size_t dead_to_right = used_to_right - live_to_right;
 321     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 322 
 323     if (reclaimed_ratio > max_reclaimed_ratio) {
 324             max_reclaimed_ratio = reclaimed_ratio;
 325             max_reclaimed_ratio_region = i;
 326             max_dead_to_right = dead_to_right;
 327             max_live_to_right = live_to_right;
 328     }
 329 
 330     print_initial_summary_region(i, c, false);
 331     tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),





 332                   reclaimed_ratio, dead_to_right, live_to_right);
 333 

 334     live_to_right -= c->data_size();
 335     ++i;
 336   }
 337 
 338   // Any remaining regions are empty.  Print one more if there is one.
 339   if (i < end_region) {
 340     print_initial_summary_region(i, summary_data.region(i));





 341   }
 342 
 343   tty->print_cr("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
 344                 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 345                 max_reclaimed_ratio_region, max_dead_to_right,
 346                 max_live_to_right, max_reclaimed_ratio);
 347 }
 348 
 349 void
 350 print_initial_summary_data(ParallelCompactData& summary_data,
 351                            SpaceInfo* space_info) {




 352   unsigned int id = PSParallelCompact::old_space_id;
 353   const MutableSpace* space;
 354   do {
 355     space = space_info[id].space();
 356     print_initial_summary_data(summary_data, space);
 357   } while (++id < PSParallelCompact::eden_space_id);
 358 
 359   do {
 360     space = space_info[id].space();
 361     print_generic_summary_data(summary_data, space->bottom(), space->top());
 362   } while (++id < PSParallelCompact::last_space_id);
 363 }
 364 #endif  // #ifndef PRODUCT
 365 
 366 #ifdef  ASSERT
 367 size_t add_obj_count;
 368 size_t add_obj_size;
 369 size_t mark_bitmap_count;
 370 size_t mark_bitmap_size;
 371 #endif  // #ifdef ASSERT


 589     // implies a region must be filled).
 590     //
 591     // An alternative to the simple loop below:  clear during post_compact(),
 592     // which uses memcpy instead of individual stores, and is easy to
 593     // parallelize.  (The downside is that it clears the entire RegionData
 594     // object as opposed to just one field.)
 595     //
 596     // post_compact() would have to clear the summary data up to the highest
 597     // address that was written during the summary phase, which would be
 598     //
 599     //         max(top, max(new_top, clear_top))
 600     //
 601     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 602     // to target_end.
 603     const RegionData* const sr = region(split_region);
 604     const size_t beg_idx =
 605       addr_to_region_idx(region_align_up(sr->destination() +
 606                                          sr->partial_obj_size()));
 607     const size_t end_idx = addr_to_region_idx(target_end);
 608 
 609     if (TraceParallelOldGCSummaryPhase) {
 610         gclog_or_tty->print_cr("split:  clearing source_region field in ["
 611                                SIZE_FORMAT ", " SIZE_FORMAT ")",
 612                                beg_idx, end_idx);
 613     }
 614     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 615       _region_data[idx].set_source_region(0);
 616     }
 617 
 618     // Set split_destination and partial_obj_size to reflect the split region.
 619     split_destination = sr->destination();
 620     partial_obj_size = sr->partial_obj_size();
 621   }
 622 
 623   // The split is recorded only if a partial object extends onto the region.
 624   if (partial_obj_size != 0) {
 625     _region_data[split_region].set_partial_obj_size(0);
 626     split_info.record(split_region, partial_obj_size, split_destination);
 627   }
 628 
 629   // Setup the continuation addresses.
 630   *target_next = split_destination + partial_obj_size;
 631   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 632 
 633   if (TraceParallelOldGCSummaryPhase) {
 634     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 635     gclog_or_tty->print_cr("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT
 636                            " pos=" SIZE_FORMAT,
 637                            split_type, p2i(source_next), split_region,
 638                            partial_obj_size);
 639     gclog_or_tty->print_cr("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
 640                            " tn=" PTR_FORMAT,
 641                            split_type, p2i(split_destination),
 642                            addr_to_region_idx(split_destination),
 643                            p2i(*target_next));
 644 
 645     if (partial_obj_size != 0) {
 646       HeapWord* const po_beg = split_info.destination();
 647       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 648       gclog_or_tty->print_cr("%s split:  "
 649                              "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
 650                              "po_end=" PTR_FORMAT " " SIZE_FORMAT,
 651                              split_type,
 652                              p2i(po_beg), addr_to_region_idx(po_beg),
 653                              p2i(po_end), addr_to_region_idx(po_end));
 654     }
 655   }
 656 
 657   return source_next;
 658 }
 659 
 660 bool ParallelCompactData::summarize(SplitInfo& split_info,
 661                                     HeapWord* source_beg, HeapWord* source_end,
 662                                     HeapWord** source_next,
 663                                     HeapWord* target_beg, HeapWord* target_end,
 664                                     HeapWord** target_next)
 665 {
 666   if (TraceParallelOldGCSummaryPhase) {
 667     HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 668     tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT

 669                   "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 670                   p2i(source_beg), p2i(source_end), p2i(source_next_val),
 671                   p2i(target_beg), p2i(target_end), p2i(*target_next));
 672   }
 673 
 674   size_t cur_region = addr_to_region_idx(source_beg);
 675   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 676 
 677   HeapWord *dest_addr = target_beg;
 678   while (cur_region < end_region) {
 679     // The destination must be set even if the region has no data.
 680     _region_data[cur_region].set_destination(dest_addr);
 681 
 682     size_t words = _region_data[cur_region].data_size();
 683     if (words > 0) {
 684       // If cur_region does not fit entirely into the target space, find a point
 685       // at which the source space can be 'split' so that part is copied to the
 686       // target space and the rest is copied elsewhere.
 687       if (dest_addr + words > target_end) {
 688         assert(source_next != NULL, "source_next is NULL when splitting");
 689         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 690                                              target_end, target_next);
 691         return false;
 692       }


 883   PSYoungGen* young_gen = heap->young_gen();
 884 
 885   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 886   _space_info[eden_space_id].set_space(young_gen->eden_space());
 887   _space_info[from_space_id].set_space(young_gen->from_space());
 888   _space_info[to_space_id].set_space(young_gen->to_space());
 889 
 890   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 891 }
 892 
 893 void PSParallelCompact::initialize_dead_wood_limiter()
 894 {
 895   const size_t max = 100;
 896   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 897   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 898   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 899   DEBUG_ONLY(_dwl_initialized = true;)
 900   _dwl_adjustment = normal_distribution(1.0);
 901 }
 902 
 903 // Simple class for storing info about the heap at the start of GC, to be used
 904 // after GC for comparison/printing.
 905 class PreGCValues {
 906 public:
 907   PreGCValues() { }
 908   PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
 909 
 910   void fill(ParallelScavengeHeap* heap) {
 911     _heap_used      = heap->used();
 912     _young_gen_used = heap->young_gen()->used_in_bytes();
 913     _old_gen_used   = heap->old_gen()->used_in_bytes();
 914     _metadata_used  = MetaspaceAux::used_bytes();
 915   };
 916 
 917   size_t heap_used() const      { return _heap_used; }
 918   size_t young_gen_used() const { return _young_gen_used; }
 919   size_t old_gen_used() const   { return _old_gen_used; }
 920   size_t metadata_used() const  { return _metadata_used; }
 921 
 922 private:
 923   size_t _heap_used;
 924   size_t _young_gen_used;
 925   size_t _old_gen_used;
 926   size_t _metadata_used;
 927 };
 928 
 929 void
 930 PSParallelCompact::clear_data_covering_space(SpaceId id)
 931 {
 932   // At this point, top is the value before GC, new_top() is the value that will
 933   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 934   // should be marked above top.  The summary data is cleared to the larger of
 935   // top & new_top.
 936   MutableSpace* const space = _space_info[id].space();
 937   HeapWord* const bot = space->bottom();
 938   HeapWord* const top = space->top();
 939   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 940 
 941   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 942   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 943   _mark_bitmap.clear_range(beg_bit, end_bit);
 944 
 945   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 946   const size_t end_region =
 947     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 948   _summary_data.clear_range(beg_region, end_region);
 949 
 950   // Clear the data used to 'split' regions.
 951   SplitInfo& split_info = _space_info[id].split_info();
 952   if (split_info.is_valid()) {
 953     split_info.clear();
 954   }
 955   DEBUG_ONLY(split_info.verify_clear();)
 956 }
 957 
 958 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
 959 {
 960   // Update the from & to space pointers in space_info, since they are swapped
 961   // at each young gen gc.  Do the update unconditionally (even though a
 962   // promotion failure does not swap spaces) because an unknown number of young
 963   // collections will have swapped the spaces an unknown number of times.
 964   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
 965   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 966   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 967   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 968 
 969   pre_gc_values->fill(heap);
 970 
 971   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 972   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 973 
 974   // Increment the invocation count
 975   heap->increment_total_collections(true);
 976 
 977   // We need to track unique mark sweep invocations as well.
 978   _total_invocations++;
 979 
 980   heap->print_heap_before_gc();
 981   heap->trace_heap_before_gc(&_gc_tracer);
 982 
 983   // Fill in TLABs
 984   heap->accumulate_statistics_all_tlabs();
 985   heap->ensure_parsability(true);  // retire TLABs
 986 
 987   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 988     HandleMark hm;  // Discard invalid handles created during verification
 989     Universe::verify(" VerifyBeforeGC:");
 990   }
 991 
 992   // Verify object start arrays
 993   if (VerifyObjectStartArray &&
 994       VerifyBeforeGC) {
 995     heap->old_gen()->verify_object_start_array();
 996   }
 997 
 998   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 999   DEBUG_ONLY(summary_data().verify_clear();)
1000 
1001   // Have worker threads release resources the next time they run a task.
1002   gc_task_manager()->release_all_resources();
1003 }
1004 
1005 void PSParallelCompact::post_compact()
1006 {
1007   GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
1008 
1009   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1010     // Clear the marking bitmap, summary data and split info.
1011     clear_data_covering_space(SpaceId(id));
1012     // Update top().  Must be done after clearing the bitmap and summary data.
1013     _space_info[id].publish_new_top();
1014   }
1015 
1016   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1017   MutableSpace* const from_space = _space_info[from_space_id].space();
1018   MutableSpace* const to_space   = _space_info[to_space_id].space();
1019 
1020   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1021   bool eden_empty = eden_space->is_empty();
1022   if (!eden_empty) {
1023     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1024                                             heap->young_gen(), heap->old_gen());
1025   }
1026 
1027   // Update heap occupancy information which is used as input to the soft ref


1541     // Recompute the summary data, taking into account the dense prefix.  If
1542     // every last byte will be reclaimed, then the existing summary data which
1543     // compacts everything can be left in place.
1544     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1545       // If dead space crosses the dense prefix boundary, it is (at least
1546       // partially) filled with a dummy object, marked live and added to the
1547       // summary data.  This simplifies the copy/update phase and must be done
1548       // before the final locations of objects are determined, to prevent
1549       // leaving a fragment of dead space that is too small to fill.
1550       fill_dense_prefix_end(id);
1551 
1552       // Compute the destination of each Region, and thus each object.
1553       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1554       _summary_data.summarize(_space_info[id].split_info(),
1555                               dense_prefix_end, space->top(), NULL,
1556                               dense_prefix_end, space->end(),
1557                               _space_info[id].new_top_addr());
1558     }
1559   }
1560 
1561   if (TraceParallelOldGCSummaryPhase) {
1562     const size_t region_size = ParallelCompactData::RegionSize;
1563     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1564     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1565     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1566     HeapWord* const new_top = _space_info[id].new_top();
1567     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1568     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1569     tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "

1570                   "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1571                   "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1572                   id, space->capacity_in_words(), p2i(dense_prefix_end),
1573                   dp_region, dp_words / region_size,
1574                   cr_words / region_size, p2i(new_top));
1575   }
1576 }
1577 
1578 #ifndef PRODUCT
1579 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1580                                           HeapWord* dst_beg, HeapWord* dst_end,
1581                                           SpaceId src_space_id,
1582                                           HeapWord* src_beg, HeapWord* src_end)
1583 {
1584   if (TraceParallelOldGCSummaryPhase) {
1585     tty->print_cr("summarizing %d [%s] into %d [%s]:  "
1586                   "src=" PTR_FORMAT "-" PTR_FORMAT " "
1587                   SIZE_FORMAT "-" SIZE_FORMAT " "
1588                   "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1589                   SIZE_FORMAT "-" SIZE_FORMAT,
1590                   src_space_id, space_names[src_space_id],
1591                   dst_space_id, space_names[dst_space_id],
1592                   p2i(src_beg), p2i(src_end),
1593                   _summary_data.addr_to_region_idx(src_beg),
1594                   _summary_data.addr_to_region_idx(src_end),
1595                   p2i(dst_beg), p2i(dst_end),
1596                   _summary_data.addr_to_region_idx(dst_beg),
1597                   _summary_data.addr_to_region_idx(dst_end));
1598   }
1599 }
1600 #endif  // #ifndef PRODUCT
1601 
1602 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1603                                       bool maximum_compaction)
1604 {
1605   GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
1606   // trace("2");
1607 
1608 #ifdef  ASSERT
1609   if (TraceParallelOldGCMarkingPhase) {
1610     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1611                   "add_obj_bytes=" SIZE_FORMAT,
1612                   add_obj_count, add_obj_size * HeapWordSize);
1613     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1614                   "mark_bitmap_bytes=" SIZE_FORMAT,
1615                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1616   }
1617 #endif  // #ifdef ASSERT
1618 
1619   // Quick summarization of each space into itself, to see how much is live.
1620   summarize_spaces_quick();
1621 
1622   if (TraceParallelOldGCSummaryPhase) {
1623     tty->print_cr("summary_phase:  after summarizing each space to self");
1624     Universe::print();
1625     NOT_PRODUCT(print_region_ranges());
1626     if (Verbose) {
1627       NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1628     }
1629   }
1630 
1631   // The amount of live data that will end up in old space (assuming it fits).
1632   size_t old_space_total_live = 0;
1633   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1634     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1635                                           _space_info[id].space()->bottom());
1636   }
1637 
1638   MutableSpace* const old_space = _space_info[old_space_id].space();
1639   const size_t old_capacity = old_space->capacity_in_words();
1640   if (old_space_total_live > old_capacity) {
1641     // XXX - should also try to expand
1642     maximum_compaction = true;
1643   }
1644 
1645   // Old generations.
1646   summarize_space(old_space_id, maximum_compaction);
1647 
1648   // Summarize the remaining spaces in the young gen.  The initial target space
1649   // is the old gen.  If a space does not fit entirely into the target, then the


1683       assert(next_src_addr != NULL, "sanity");
1684 
1685       // The source space becomes the new target, so the remainder is compacted
1686       // within the space itself.
1687       dst_space_id = SpaceId(id);
1688       dst_space_end = space->end();
1689       new_top_addr = _space_info[id].new_top_addr();
1690       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1691                                     space->bottom(), dst_space_end,
1692                                     SpaceId(id), next_src_addr, space->top());)
1693       done = _summary_data.summarize(_space_info[id].split_info(),
1694                                      next_src_addr, space->top(),
1695                                      NULL,
1696                                      space->bottom(), dst_space_end,
1697                                      new_top_addr);
1698       assert(done, "space must fit when compacted into itself");
1699       assert(*new_top_addr <= space->top(), "usage should not grow");
1700     }
1701   }
1702 
1703   if (TraceParallelOldGCSummaryPhase) {
1704     tty->print_cr("summary_phase:  after final summarization");
1705     Universe::print();
1706     NOT_PRODUCT(print_region_ranges());
1707     if (Verbose) {
1708       NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1709     }
1710   }
1711 }
1712 
1713 // This method should contain all heap-specific policy for invoking a full
1714 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1715 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1716 // before full gc, or any other specialized behavior, it needs to be added here.
1717 //
1718 // Note that this method should only be called from the vm_thread while at a
1719 // safepoint.
1720 //
1721 // Note that the all_soft_refs_clear flag in the collector policy
1722 // may be true because this method can be called without intervening
1723 // activity.  For example when the heap space is tight and full measure
1724 // are being taken to free space.
1725 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1726   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1727   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1728          "should be in vm thread");
1729 
1730   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


1765   TimeStamp compaction_start;
1766   TimeStamp collection_exit;
1767 
1768   GCCause::Cause gc_cause = heap->gc_cause();
1769   PSYoungGen* young_gen = heap->young_gen();
1770   PSOldGen* old_gen = heap->old_gen();
1771   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1772 
1773   // The scope of casr should end after code that can change
1774   // CollectorPolicy::_should_clear_all_soft_refs.
1775   ClearedAllSoftRefs casr(maximum_heap_compaction,
1776                           heap->collector_policy());
1777 
1778   if (ZapUnusedHeapArea) {
1779     // Save information needed to minimize mangling
1780     heap->record_gen_tops_before_GC();
1781   }
1782 
1783   heap->pre_full_gc_dump(&_gc_timer);
1784 
1785   _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
1786 
1787   // Make sure data structures are sane, make the heap parsable, and do other
1788   // miscellaneous bookkeeping.
1789   PreGCValues pre_gc_values;
1790   pre_compact(&pre_gc_values);

1791 
1792   // Get the compaction manager reserved for the VM thread.
1793   ParCompactionManager* const vmthread_cm =
1794     ParCompactionManager::manager_array(gc_task_manager()->workers());
1795 
1796   // Place after pre_compact() where the number of invocations is incremented.
1797   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
1798 
1799   {
1800     ResourceMark rm;
1801     HandleMark hm;
1802 
1803     // Set the number of GC threads to be used in this collection
1804     gc_task_manager()->set_active_gang();
1805     gc_task_manager()->task_idle_workers();
1806 
1807     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
1808     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
1809     TraceCollectorStats tcs(counters());
1810     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1811 
1812     if (TraceOldGenTime) accumulated_time()->start();
1813 
1814     // Let the size policy know we're starting
1815     size_policy->major_collection_begin();
1816 
1817     CodeCache::gc_prologue();
1818 
1819 #if defined(COMPILER2) || INCLUDE_JVMCI
1820     DerivedPointerTable::clear();
1821 #endif
1822 
1823     ref_processor()->enable_discovery();
1824     ref_processor()->setup_policy(maximum_heap_compaction);
1825 
1826     bool marked_for_unloading = false;
1827 
1828     marking_start.update();


1835 #if defined(COMPILER2) || INCLUDE_JVMCI
1836     assert(DerivedPointerTable::is_active(), "Sanity");
1837     DerivedPointerTable::set_active(false);
1838 #endif
1839 
1840     // adjust_roots() updates Universe::_intArrayKlassObj which is
1841     // needed by the compaction for filling holes in the dense prefix.
1842     adjust_roots();
1843 
1844     compaction_start.update();
1845     compact();
1846 
1847     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1848     // done before resizing.
1849     post_compact();
1850 
1851     // Let the size policy know we're done
1852     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1853 
1854     if (UseAdaptiveSizePolicy) {
1855       if (PrintAdaptiveSizePolicy) {
1856         gclog_or_tty->print("AdaptiveSizeStart: ");
1857         gclog_or_tty->stamp();
1858         gclog_or_tty->print_cr(" collection: %d ",
1859                        heap->total_collections());
1860         if (Verbose) {
1861           gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
1862             " young_gen_capacity: " SIZE_FORMAT,
1863             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1864         }
1865       }
1866 
1867       // Don't check if the size_policy is ready here.  Let
1868       // the size_policy check that internally.
1869       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1870           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1871         // Swap the survivor spaces if from_space is empty. The
1872         // resize_young_gen() called below is normally used after
1873         // a successful young GC and swapping of survivor spaces;
1874         // otherwise, it will fail to resize the young gen with
1875         // the current implementation.
1876         if (young_gen->from_space()->is_empty()) {
1877           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1878           young_gen->swap_spaces();
1879         }
1880 
1881         // Calculate optimal free space amounts
1882         assert(young_gen->max_size() >
1883           young_gen->from_space()->capacity_in_bytes() +
1884           young_gen->to_space()->capacity_in_bytes(),
1885           "Sizes of space in young gen are out-of-bounds");


1903                                                     max_old_gen_size,
1904                                                     max_eden_size,
1905                                                     true /* full gc*/);
1906 
1907         size_policy->check_gc_overhead_limit(young_live,
1908                                              eden_live,
1909                                              max_old_gen_size,
1910                                              max_eden_size,
1911                                              true /* full gc*/,
1912                                              gc_cause,
1913                                              heap->collector_policy());
1914 
1915         size_policy->decay_supplemental_growth(true /* full gc*/);
1916 
1917         heap->resize_old_gen(
1918           size_policy->calculated_old_free_size_in_bytes());
1919 
1920         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1921                                size_policy->calculated_survivor_size_in_bytes());
1922       }
1923       if (PrintAdaptiveSizePolicy) {
1924         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
1925                        heap->total_collections());
1926       }
1927     }
1928 
1929     if (UsePerfData) {
1930       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1931       counters->update_counters();
1932       counters->update_old_capacity(old_gen->capacity_in_bytes());
1933       counters->update_young_capacity(young_gen->capacity_in_bytes());
1934     }
1935 
1936     heap->resize_all_tlabs();
1937 
1938     // Resize the metaspace capacity after a collection
1939     MetaspaceGC::compute_new_size();
1940 
1941     if (TraceOldGenTime) accumulated_time()->stop();


1942 
1943     if (PrintGC) {
1944       if (PrintGCDetails) {
1945         // No GC timestamp here.  This is after GC so it would be confusing.
1946         young_gen->print_used_change(pre_gc_values.young_gen_used());
1947         old_gen->print_used_change(pre_gc_values.old_gen_used());
1948         heap->print_heap_change(pre_gc_values.heap_used());
1949         MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
1950       } else {
1951         heap->print_heap_change(pre_gc_values.heap_used());
1952       }
1953     }
1954 
1955     // Track memory usage and detect low memory
1956     MemoryService::track_memory_usage();
1957     heap->update_counters();
1958     gc_task_manager()->release_idle_workers();
1959   }
1960 
1961 #ifdef ASSERT
1962   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1963     ParCompactionManager* const cm =
1964       ParCompactionManager::manager_array(int(i));
1965     assert(cm->marking_stack()->is_empty(),       "should be empty");
1966     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1967   }
1968 #endif // ASSERT
1969 
1970   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1971     HandleMark hm;  // Discard invalid handles created during verification
1972     Universe::verify(" VerifyAfterGC:");
1973   }
1974 
1975   // Re-verify object start arrays
1976   if (VerifyObjectStartArray &&
1977       VerifyAfterGC) {
1978     old_gen->verify_object_start_array();
1979   }
1980 
1981   if (ZapUnusedHeapArea) {
1982     old_gen->object_space()->check_mangled_unused_area_complete();
1983   }
1984 
1985   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1986 
1987   collection_exit.update();
1988 
1989   heap->print_heap_after_gc();
1990   heap->trace_heap_after_gc(&_gc_tracer);
1991 
1992   if (PrintGCTaskTimeStamps) {
1993     gclog_or_tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " "
1994                            JLONG_FORMAT,
1995                            marking_start.ticks(), compaction_start.ticks(),
1996                            collection_exit.ticks());
1997     gc_task_manager()->print_task_time_stamps();
1998   }
1999 
2000   heap->post_full_gc_dump(&_gc_timer);
2001 
2002 #ifdef TRACESPINNING
2003   ParallelTaskTerminator::print_termination_counts();
2004 #endif
2005 


2006   _gc_timer.register_gc_end();
2007 
2008   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2009   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2010 
2011   return true;
2012 }
2013 
2014 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2015                                              PSYoungGen* young_gen,
2016                                              PSOldGen* old_gen) {
2017   MutableSpace* const eden_space = young_gen->eden_space();
2018   assert(!eden_space->is_empty(), "eden must be non-empty");
2019   assert(young_gen->virtual_space()->alignment() ==
2020          old_gen->virtual_space()->alignment(), "alignments do not match");
2021 
2022   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2023     return false;
2024   }
2025 


2032   }
2033 
2034   // Figure out how much to take from eden.  Include the average amount promoted
2035   // in the total; otherwise the next young gen GC will simply bail out to a
2036   // full GC.
2037   const size_t alignment = old_gen->virtual_space()->alignment();
2038   const size_t eden_used = eden_space->used_in_bytes();
2039   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2040   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2041   const size_t eden_capacity = eden_space->capacity_in_bytes();
2042 
2043   if (absorb_size >= eden_capacity) {
2044     return false; // Must leave some space in eden.
2045   }
2046 
2047   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2048   if (new_young_size < young_gen->min_gen_size()) {
2049     return false; // Respect young gen minimum size.
2050   }
2051 
2052   if (TraceAdaptiveGCBoundary && Verbose) {
2053     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
2054                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2055                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2056                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2057                         absorb_size / K,
2058                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2059                         young_gen->from_space()->used_in_bytes() / K,
2060                         young_gen->to_space()->used_in_bytes() / K,
2061                         young_gen->capacity_in_bytes() / K, new_young_size / K);
2062   }
2063 
2064   // Fill the unused part of the old gen.
2065   MutableSpace* const old_space = old_gen->object_space();
2066   HeapWord* const unused_start = old_space->top();
2067   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2068 
2069   if (unused_words > 0) {
2070     if (unused_words < CollectedHeap::min_fill_size()) {
2071       return false;  // If the old gen cannot be filled, must give up.
2072     }
2073     CollectedHeap::fill_with_objects(unused_start, unused_words);
2074   }
2075 
2076   // Take the live data from eden and set both top and end in the old gen to
2077   // eden top.  (Need to set end because reset_after_change() mangles the region
2078   // from end to virtual_space->high() in debug builds).
2079   HeapWord* const new_top = eden_space->top();
2080   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2081                                         absorb_size);
2082   young_gen->reset_after_change();


2092 
2093   // Could update the promoted average here, but it is not typically updated at
2094   // full GCs and the value to use is unclear.  Something like
2095   //
2096   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2097 
2098   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2099   return true;
2100 }
2101 
2102 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2103   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2104     "shouldn't return NULL");
2105   return ParallelScavengeHeap::gc_task_manager();
2106 }
2107 
2108 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2109                                       bool maximum_heap_compaction,
2110                                       ParallelOldTracer *gc_tracer) {
2111   // Recursively traverse all live objects and mark them
2112   GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
2113 
2114   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2115   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2116   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2117   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2118   ParallelTaskTerminator terminator(active_gc_threads, qset);
2119 
2120   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2121   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2122 
2123   // Need new claim bits before marking starts.
2124   ClassLoaderDataGraph::clear_claimed_marks();
2125 
2126   {
2127     GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
2128 
2129     ParallelScavengeHeap::ParStrongRootsScope psrs;
2130 
2131     GCTaskQueue* q = GCTaskQueue::create();
2132 
2133     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2134     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2135     // We scan the thread roots in parallel
2136     Threads::create_thread_roots_marking_tasks(q);
2137     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2138     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2139     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2140     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2141     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2142     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2143     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2144 
2145     if (active_gc_threads > 1) {
2146       for (uint j = 0; j < active_gc_threads; j++) {
2147         q->enqueue(new StealMarkingTask(&terminator));
2148       }
2149     }
2150 
2151     gc_task_manager()->execute_and_wait(q);
2152   }
2153 
2154   // Process reference objects found during marking
2155   {
2156     GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
2157 
2158     ReferenceProcessorStats stats;
2159     if (ref_processor()->processing_is_mt()) {
2160       RefProcTaskExecutor task_executor;
2161       stats = ref_processor()->process_discovered_references(
2162         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2163         &task_executor, &_gc_timer);
2164     } else {
2165       stats = ref_processor()->process_discovered_references(
2166         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2167         &_gc_timer);
2168     }
2169 
2170     gc_tracer->report_gc_reference_stats(stats);
2171   }
2172 
2173   GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
2174 
2175   // This is the point where the entire marking should have completed.
2176   assert(cm->marking_stacks_empty(), "Marking should have completed");
2177 
2178   // Follow system dictionary roots and unload classes.
2179   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2180 
2181   // Unload nmethods.
2182   CodeCache::do_unloading(is_alive_closure(), purged_class);
2183 
2184   // Prune dead klasses from subklass/sibling/implementor lists.
2185   Klass::clean_weak_klass_links(is_alive_closure());
2186 
2187   // Delete entries for dead interned strings.
2188   StringTable::unlink(is_alive_closure());
2189 
2190   // Clean up unreferenced symbols in symbol table.
2191   SymbolTable::unlink();
2192   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2193 }
2194 
2195 // This should be moved to the shared markSweep code!
2196 class PSAlwaysTrueClosure: public BoolObjectClosure {
2197 public:
2198   bool do_object_b(oop p) { return true; }
2199 };
2200 static PSAlwaysTrueClosure always_true;
2201 
2202 void PSParallelCompact::adjust_roots() {
2203   // Adjust the pointers to reflect the new locations
2204   GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
2205 
2206   // Need new claim bits when tracing through and adjusting pointers.
2207   ClassLoaderDataGraph::clear_claimed_marks();
2208 
2209   // General strong roots.
2210   Universe::oops_do(adjust_pointer_closure());
2211   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
2212   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2213   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2214   ObjectSynchronizer::oops_do(adjust_pointer_closure());
2215   FlatProfiler::oops_do(adjust_pointer_closure());
2216   Management::oops_do(adjust_pointer_closure());
2217   JvmtiExport::oops_do(adjust_pointer_closure());
2218   SystemDictionary::oops_do(adjust_pointer_closure());
2219   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2220 
2221   // Now adjust pointers in remaining weak roots.  (All of which should
2222   // have been cleared if they pointed to non-surviving objects.)
2223   // Global (weak) JNI handles
2224   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2225 
2226   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2227   CodeCache::blobs_do(&adjust_from_blobs);
2228   StringTable::oops_do(adjust_pointer_closure());
2229   ref_processor()->weak_oops_do(adjust_pointer_closure());
2230   // Roots were visited so references into the young gen in roots
2231   // may have been scanned.  Process them also.
2232   // Should the reference processor have a span that excludes
2233   // young gen objects?
2234   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2235 }
2236 







































2237 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2238                                                       uint parallel_gc_threads)
2239 {
2240   GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
2241 
2242   // Find the threads that are active
2243   unsigned int which = 0;
2244 
2245   const uint task_count = MAX2(parallel_gc_threads, 1U);
2246   for (uint j = 0; j < task_count; j++) {
2247     q->enqueue(new DrainStacksCompactionTask(j));
2248     ParCompactionManager::verify_region_list_empty(j);
2249     // Set the region stacks variables to "no" region stack values
2250     // so that they will be recognized and needing a region stack
2251     // in the stealing tasks if they do not get one by executing
2252     // a draining stack.
2253     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2254     cm->set_region_stack(NULL);
2255     cm->set_region_stack_index((uint)max_uintx);
2256   }
2257   ParCompactionManager::reset_recycled_stack_index();
2258 
2259   // Find all regions that are available (can be filled immediately) and
2260   // distribute them to the thread stacks.  The iteration is done in reverse
2261   // order (high to low) so the regions will be removed in ascending order.
2262 
2263   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2264 
2265   size_t fillable_regions = 0;   // A count for diagnostic purposes.
2266   // A region index which corresponds to the tasks created above.
2267   // "which" must be 0 <= which < task_count
2268 
2269   which = 0;
2270   // id + 1 is used to test termination so unsigned  can
2271   // be used with an old_space_id == 0.

2272   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2273     SpaceInfo* const space_info = _space_info + id;
2274     MutableSpace* const space = space_info->space();
2275     HeapWord* const new_top = space_info->new_top();
2276 
2277     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2278     const size_t end_region =
2279       sd.addr_to_region_idx(sd.region_align_up(new_top));
2280 
2281     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2282       if (sd.region(cur)->claim_unsafe()) {
2283         ParCompactionManager::region_list_push(which, cur);
2284 
2285         if (TraceParallelOldGCCompactionPhase && Verbose) {
2286           const size_t count_mod_8 = fillable_regions & 7;
2287           if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2288           gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2289           if (count_mod_8 == 7) gclog_or_tty->cr();
2290         }
2291 
2292         NOT_PRODUCT(++fillable_regions;)
2293 
2294         // Assign regions to tasks in round-robin fashion.
2295         if (++which == task_count) {
2296           assert(which <= parallel_gc_threads,
2297             "Inconsistent number of workers");
2298           which = 0;
2299         }
2300       }
2301     }
2302   }
2303 
2304   if (TraceParallelOldGCCompactionPhase) {
2305     if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2306     gclog_or_tty->print_cr(SIZE_FORMAT " initially fillable regions", fillable_regions);
2307   }
2308 }
2309 
2310 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2311 
2312 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2313                                                     uint parallel_gc_threads) {
2314   GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
2315 
2316   ParallelCompactData& sd = PSParallelCompact::summary_data();
2317 
2318   // Iterate over all the spaces adding tasks for updating
2319   // regions in the dense prefix.  Assume that 1 gc thread
2320   // will work on opening the gaps and the remaining gc threads
2321   // will work on the dense prefix.
2322   unsigned int space_id;
2323   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2324     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2325     const MutableSpace* const space = _space_info[space_id].space();
2326 
2327     if (dense_prefix_end == space->bottom()) {
2328       // There is no dense prefix for this space.
2329       continue;
2330     }
2331 
2332     // The dense prefix is before this region.
2333     size_t region_index_end_dense_prefix =
2334         sd.addr_to_region_idx(dense_prefix_end);


2376         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2377                                              region_index_start,
2378                                              region_index_end));
2379         region_index_start = region_index_end;
2380       }
2381     }
2382     // This gets any part of the dense prefix that did not
2383     // fit evenly.
2384     if (region_index_start < region_index_end_dense_prefix) {
2385       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2386                                            region_index_start,
2387                                            region_index_end_dense_prefix));
2388     }
2389   }
2390 }
2391 
2392 void PSParallelCompact::enqueue_region_stealing_tasks(
2393                                      GCTaskQueue* q,
2394                                      ParallelTaskTerminator* terminator_ptr,
2395                                      uint parallel_gc_threads) {
2396   GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
2397 
2398   // Once a thread has drained it's stack, it should try to steal regions from
2399   // other threads.
2400   if (parallel_gc_threads > 1) {
2401     for (uint j = 0; j < parallel_gc_threads; j++) {
2402       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2403     }
2404   }
2405 }
2406 
2407 #ifdef ASSERT
2408 // Write a histogram of the number of times the block table was filled for a
2409 // region.
2410 void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
2411 {
2412   if (!TraceParallelOldGCCompactionPhase) return;




2413 
2414   typedef ParallelCompactData::RegionData rd_t;
2415   ParallelCompactData& sd = summary_data();
2416 
2417   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2418     MutableSpace* const spc = _space_info[id].space();
2419     if (spc->bottom() != spc->top()) {
2420       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2421       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2422       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2423 
2424       size_t histo[5] = { 0, 0, 0, 0, 0 };
2425       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2426       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2427 
2428       for (const rd_t* cur = beg; cur < end; ++cur) {
2429         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2430       }
2431       out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2432       for (size_t i = 0; i < histo_len; ++i) {
2433         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2434                    histo[i], 100.0 * histo[i] / region_cnt);
2435       }
2436       out->cr();
2437     }
2438   }
2439 }
2440 #endif // #ifdef ASSERT
2441 
2442 void PSParallelCompact::compact() {
2443   // trace("5");
2444   GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
2445 
2446   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2447   PSOldGen* old_gen = heap->old_gen();
2448   old_gen->start_array()->reset();
2449   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2450   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2451   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2452   ParallelTaskTerminator terminator(active_gc_threads, qset);
2453 
2454   GCTaskQueue* q = GCTaskQueue::create();
2455   enqueue_region_draining_tasks(q, active_gc_threads);
2456   enqueue_dense_prefix_tasks(q, active_gc_threads);
2457   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2458 
2459   {
2460     GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
2461 
2462     gc_task_manager()->execute_and_wait(q);
2463 
2464 #ifdef  ASSERT
2465     // Verify that all regions have been processed before the deferred updates.
2466     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2467       verify_complete(SpaceId(id));
2468     }
2469 #endif
2470   }
2471 
2472   {
2473     // Update the deferred objects, if any.  Any compaction manager can be used.
2474     GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
2475     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2476     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2477       update_deferred_objects(cm, SpaceId(id));
2478     }
2479   }
2480 
2481   DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
2482 }
2483 
2484 #ifdef  ASSERT
2485 void PSParallelCompact::verify_complete(SpaceId space_id) {
2486   // All Regions between space bottom() to new_top() should be marked as filled
2487   // and all Regions between new_top() and top() should be available (i.e.,
2488   // should have been emptied).
2489   ParallelCompactData& sd = summary_data();
2490   SpaceInfo si = _space_info[space_id];
2491   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2492   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2493   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2494   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2495   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2496 
2497   bool issued_a_warning = false;
2498 
2499   size_t cur_region;
2500   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2501     const RegionData* const c = sd.region(cur_region);


3090 
3091 void InstanceKlass::oop_pc_update_pointers(oop obj) {
3092   oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());
3093 }
3094 
3095 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
3096   InstanceKlass::oop_pc_update_pointers(obj);
3097 
3098   oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());
3099 }
3100 
3101 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
3102   InstanceKlass::oop_pc_update_pointers(obj);
3103 }
3104 
3105 #ifdef ASSERT
3106 template <class T> static void trace_reference_gc(const char *s, oop obj,
3107                                                   T* referent_addr,
3108                                                   T* next_addr,
3109                                                   T* discovered_addr) {
3110   if(TraceReferenceGC && PrintGCDetails) {
3111     gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
3112     gclog_or_tty->print_cr("     referent_addr/* " PTR_FORMAT " / "
3113                            PTR_FORMAT, p2i(referent_addr),
3114                            referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3115     gclog_or_tty->print_cr("     next_addr/* " PTR_FORMAT " / "
3116                            PTR_FORMAT, p2i(next_addr),
3117                            next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3118     gclog_or_tty->print_cr("     discovered_addr/* " PTR_FORMAT " / "
3119                            PTR_FORMAT, p2i(discovered_addr),
3120                            discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3121   }
3122 }
3123 #endif
3124 
3125 template <class T>
3126 static void oop_pc_update_pointers_specialized(oop obj) {
3127   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3128   PSParallelCompact::adjust_pointer(referent_addr);
3129   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3130   PSParallelCompact::adjust_pointer(next_addr);
3131   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3132   PSParallelCompact::adjust_pointer(discovered_addr);
3133   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3134                                 referent_addr, next_addr, discovered_addr);)
3135 }
3136 
3137 void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
3138   InstanceKlass::oop_pc_update_pointers(obj);
3139 
3140   if (UseCompressedOops) {
3141     oop_pc_update_pointers_specialized<narrowOop>(obj);




  32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  33 #include "gc/parallel/psCompactionManager.inline.hpp"
  34 #include "gc/parallel/psMarkSweep.hpp"
  35 #include "gc/parallel/psMarkSweepDecorator.hpp"
  36 #include "gc/parallel/psOldGen.hpp"
  37 #include "gc/parallel/psParallelCompact.inline.hpp"
  38 #include "gc/parallel/psPromotionManager.inline.hpp"
  39 #include "gc/parallel/psScavenge.hpp"
  40 #include "gc/parallel/psYoungGen.hpp"
  41 #include "gc/shared/gcCause.hpp"
  42 #include "gc/shared/gcHeapSummary.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcLocker.inline.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/isGCActiveMark.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/referenceProcessor.hpp"
  51 #include "gc/shared/spaceDecorator.hpp"
  52 #include "logging/log.hpp"
  53 #include "oops/instanceKlass.inline.hpp"
  54 #include "oops/instanceMirrorKlass.inline.hpp"
  55 #include "oops/methodData.hpp"
  56 #include "oops/objArrayKlass.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/atomic.inline.hpp"
  59 #include "runtime/fprofiler.hpp"
  60 #include "runtime/safepoint.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/management.hpp"
  63 #include "services/memTracker.hpp"
  64 #include "services/memoryService.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/stack.inline.hpp"
  67 
  68 #include <math.h>
  69 
  70 // All sizes are in HeapWords.
  71 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  72 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;


  90 
  91 const ParallelCompactData::RegionData::region_sz_t
  92 ParallelCompactData::RegionData::dc_shift = 27;
  93 
  94 const ParallelCompactData::RegionData::region_sz_t
  95 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  96 
  97 const ParallelCompactData::RegionData::region_sz_t
  98 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
  99 
 100 const ParallelCompactData::RegionData::region_sz_t
 101 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 102 
 103 const ParallelCompactData::RegionData::region_sz_t
 104 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 105 
 106 const ParallelCompactData::RegionData::region_sz_t
 107 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 108 
 109 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];

 110 
 111 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 112 
 113 double PSParallelCompact::_dwl_mean;
 114 double PSParallelCompact::_dwl_std_dev;
 115 double PSParallelCompact::_dwl_first_term;
 116 double PSParallelCompact::_dwl_adjustment;
 117 #ifdef  ASSERT
 118 bool   PSParallelCompact::_dwl_initialized = false;
 119 #endif  // #ifdef ASSERT
 120 
 121 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 122                        HeapWord* destination)
 123 {
 124   assert(src_region_idx != 0, "invalid src_region_idx");
 125   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 126   assert(destination != NULL, "invalid destination argument");
 127 
 128   _src_region_idx = src_region_idx;
 129   _partial_obj_size = partial_obj_size;


 176 {
 177   assert(_src_region_idx == 0, "not clear");
 178   assert(_partial_obj_size == 0, "not clear");
 179   assert(_destination == NULL, "not clear");
 180   assert(_destination_count == 0, "not clear");
 181   assert(_dest_region_addr == NULL, "not clear");
 182   assert(_first_src_addr == NULL, "not clear");
 183 }
 184 #endif  // #ifdef ASSERT
 185 
 186 
 187 void PSParallelCompact::print_on_error(outputStream* st) {
 188   _mark_bitmap.print_on_error(st);
 189 }
 190 
 191 #ifndef PRODUCT
 192 const char* PSParallelCompact::space_names[] = {
 193   "old ", "eden", "from", "to  "
 194 };
 195 
 196 void PSParallelCompact::print_region_ranges() {
 197   LogHandle(gc, compaction, phases) log;
 198   if (!log.is_develop()) {
 199     return;
 200   }
 201   ResourceMark rm;
 202   Universe::print_on(log.develop_stream());
 203   log.develop("space  bottom     top        end        new_top");
 204   log.develop("------ ---------- ---------- ---------- ----------");
 205 
 206   for (unsigned int id = 0; id < last_space_id; ++id) {
 207     const MutableSpace* space = _space_info[id].space();
 208     log.develop("%u %s "
 209                 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 210                 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 211                 id, space_names[id],
 212                 summary_data().addr_to_region_idx(space->bottom()),
 213                 summary_data().addr_to_region_idx(space->top()),
 214                 summary_data().addr_to_region_idx(space->end()),
 215                 summary_data().addr_to_region_idx(_space_info[id].new_top()));
 216   }
 217 }
 218 
 219 void
 220 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 221 {
 222 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 223 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 224 
 225   ParallelCompactData& sd = PSParallelCompact::summary_data();
 226   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 227   log_develop(gc, compaction, phases)(
 228       REGION_IDX_FORMAT " " PTR_FORMAT " "
 229       REGION_IDX_FORMAT " " PTR_FORMAT " "
 230       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 231       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 232       i, p2i(c->data_location()), dci, p2i(c->destination()),
 233       c->partial_obj_size(), c->live_obj_size(),
 234       c->data_size(), c->source_region(), c->destination_count());
 235 
 236 #undef  REGION_IDX_FORMAT
 237 #undef  REGION_DATA_FORMAT
 238 }
 239 
 240 void
 241 print_generic_summary_data(ParallelCompactData& summary_data,
 242                            HeapWord* const beg_addr,
 243                            HeapWord* const end_addr)
 244 {
 245   size_t total_words = 0;
 246   size_t i = summary_data.addr_to_region_idx(beg_addr);
 247   const size_t last = summary_data.addr_to_region_idx(end_addr);
 248   HeapWord* pdest = 0;
 249 
 250   while (i <= last) {
 251     ParallelCompactData::RegionData* c = summary_data.region(i);
 252     if (c->data_size() != 0 || c->destination() != pdest) {
 253       print_generic_summary_region(i, c);
 254       total_words += c->data_size();
 255       pdest = c->destination();
 256     }
 257     ++i;
 258   }
 259 
 260   log_develop(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 261 }
 262 
 263 void
 264 print_generic_summary_data(ParallelCompactData& summary_data,
 265                            SpaceInfo* space_info)
 266 {
 267   if (!log_is_enabled(Develop, gc, compaction, phases)) {
 268     return;
 269   }
 270 
 271   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 272     const MutableSpace* space = space_info[id].space();
 273     print_generic_summary_data(summary_data, space->bottom(),
 274                                MAX2(space->top(), space_info[id].new_top()));
 275   }
 276 }
 277 
 278 void














 279 print_initial_summary_data(ParallelCompactData& summary_data,
 280                            const MutableSpace* space) {
 281   if (space->top() == space->bottom()) {
 282     return;
 283   }
 284 
 285   const size_t region_size = ParallelCompactData::RegionSize;
 286   typedef ParallelCompactData::RegionData RegionData;
 287   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 288   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 289   const RegionData* c = summary_data.region(end_region - 1);
 290   HeapWord* end_addr = c->destination() + c->data_size();
 291   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 292 
 293   // Print (and count) the full regions at the beginning of the space.
 294   size_t full_region_count = 0;
 295   size_t i = summary_data.addr_to_region_idx(space->bottom());
 296   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 297     ParallelCompactData::RegionData* c = summary_data.region(i);
 298     log_develop(gc, compaction, phases)(
 299         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 300         i, p2i(c->destination()),
 301         c->partial_obj_size(), c->live_obj_size(),
 302         c->data_size(), c->source_region(), c->destination_count());
 303     ++full_region_count;
 304     ++i;
 305   }
 306 
 307   size_t live_to_right = live_in_space - full_region_count * region_size;
 308 
 309   double max_reclaimed_ratio = 0.0;
 310   size_t max_reclaimed_ratio_region = 0;
 311   size_t max_dead_to_right = 0;
 312   size_t max_live_to_right = 0;
 313 
 314   // Print the 'reclaimed ratio' for regions while there is something live in
 315   // the region or to the right of it.  The remaining regions are empty (and
 316   // uninteresting), and computing the ratio will result in division by 0.
 317   while (i < end_region && live_to_right > 0) {
 318     c = summary_data.region(i);
 319     HeapWord* const region_addr = summary_data.region_to_addr(i);
 320     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 321     const size_t dead_to_right = used_to_right - live_to_right;
 322     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 323 
 324     if (reclaimed_ratio > max_reclaimed_ratio) {
 325             max_reclaimed_ratio = reclaimed_ratio;
 326             max_reclaimed_ratio_region = i;
 327             max_dead_to_right = dead_to_right;
 328             max_live_to_right = live_to_right;
 329     }
 330 
 331     ParallelCompactData::RegionData* c = summary_data.region(i);
 332     log_develop(gc, compaction, phases)(
 333         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 334         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 335         i, p2i(c->destination()),
 336         c->partial_obj_size(), c->live_obj_size(),
 337         c->data_size(), c->source_region(), c->destination_count(),
 338         reclaimed_ratio, dead_to_right, live_to_right);
 339 
 340 
 341     live_to_right -= c->data_size();
 342     ++i;
 343   }
 344 
 345   // Any remaining regions are empty.  Print one more if there is one.
 346   if (i < end_region) {
 347     ParallelCompactData::RegionData* c = summary_data.region(i);
 348     log_develop(gc, compaction, phases)(
 349         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 350          i, p2i(c->destination()),
 351          c->partial_obj_size(), c->live_obj_size(),
 352          c->data_size(), c->source_region(), c->destination_count());
 353   }
 354 
 355   log_develop(gc, compaction, phases)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 356                                             max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);


 357 }
 358 
 359 void
 360 print_initial_summary_data(ParallelCompactData& summary_data,
 361                            SpaceInfo* space_info) {
 362   if (!log_is_enabled(Develop, gc, compaction, phases)) {
 363     return;
 364   }
 365 
 366   unsigned int id = PSParallelCompact::old_space_id;
 367   const MutableSpace* space;
 368   do {
 369     space = space_info[id].space();
 370     print_initial_summary_data(summary_data, space);
 371   } while (++id < PSParallelCompact::eden_space_id);
 372 
 373   do {
 374     space = space_info[id].space();
 375     print_generic_summary_data(summary_data, space->bottom(), space->top());
 376   } while (++id < PSParallelCompact::last_space_id);
 377 }
 378 #endif  // #ifndef PRODUCT
 379 
 380 #ifdef  ASSERT
 381 size_t add_obj_count;
 382 size_t add_obj_size;
 383 size_t mark_bitmap_count;
 384 size_t mark_bitmap_size;
 385 #endif  // #ifdef ASSERT


 603     // implies a region must be filled).
 604     //
 605     // An alternative to the simple loop below:  clear during post_compact(),
 606     // which uses memcpy instead of individual stores, and is easy to
 607     // parallelize.  (The downside is that it clears the entire RegionData
 608     // object as opposed to just one field.)
 609     //
 610     // post_compact() would have to clear the summary data up to the highest
 611     // address that was written during the summary phase, which would be
 612     //
 613     //         max(top, max(new_top, clear_top))
 614     //
 615     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 616     // to target_end.
 617     const RegionData* const sr = region(split_region);
 618     const size_t beg_idx =
 619       addr_to_region_idx(region_align_up(sr->destination() +
 620                                          sr->partial_obj_size()));
 621     const size_t end_idx = addr_to_region_idx(target_end);
 622 
 623     log_develop(gc, compaction, phases)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);




 624     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 625       _region_data[idx].set_source_region(0);
 626     }
 627 
 628     // Set split_destination and partial_obj_size to reflect the split region.
 629     split_destination = sr->destination();
 630     partial_obj_size = sr->partial_obj_size();
 631   }
 632 
 633   // The split is recorded only if a partial object extends onto the region.
 634   if (partial_obj_size != 0) {
 635     _region_data[split_region].set_partial_obj_size(0);
 636     split_info.record(split_region, partial_obj_size, split_destination);
 637   }
 638 
 639   // Setup the continuation addresses.
 640   *target_next = split_destination + partial_obj_size;
 641   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 642 
 643   if (log_is_enabled(Develop, gc, compaction, phases)) {
 644     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 645     log_develop(gc, compaction, phases)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 646                                         split_type, p2i(source_next), split_region, partial_obj_size);
 647     log_develop(gc, compaction, phases)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,



 648                                        split_type, p2i(split_destination),
 649                                        addr_to_region_idx(split_destination),
 650                                        p2i(*target_next));
 651 
 652     if (partial_obj_size != 0) {
 653       HeapWord* const po_beg = split_info.destination();
 654       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 655       log_develop(gc, compaction, phases)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,


 656                                           split_type,
 657                                           p2i(po_beg), addr_to_region_idx(po_beg),
 658                                           p2i(po_end), addr_to_region_idx(po_end));
 659     }
 660   }
 661 
 662   return source_next;
 663 }
 664 
 665 bool ParallelCompactData::summarize(SplitInfo& split_info,
 666                                     HeapWord* source_beg, HeapWord* source_end,
 667                                     HeapWord** source_next,
 668                                     HeapWord* target_beg, HeapWord* target_end,
 669                                     HeapWord** target_next)
 670 {

 671   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 672   log_develop(gc, compaction, phases)(
 673       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 674       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 675       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 676       p2i(target_beg), p2i(target_end), p2i(*target_next));

 677 
 678   size_t cur_region = addr_to_region_idx(source_beg);
 679   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 680 
 681   HeapWord *dest_addr = target_beg;
 682   while (cur_region < end_region) {
 683     // The destination must be set even if the region has no data.
 684     _region_data[cur_region].set_destination(dest_addr);
 685 
 686     size_t words = _region_data[cur_region].data_size();
 687     if (words > 0) {
 688       // If cur_region does not fit entirely into the target space, find a point
 689       // at which the source space can be 'split' so that part is copied to the
 690       // target space and the rest is copied elsewhere.
 691       if (dest_addr + words > target_end) {
 692         assert(source_next != NULL, "source_next is NULL when splitting");
 693         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 694                                              target_end, target_next);
 695         return false;
 696       }


 887   PSYoungGen* young_gen = heap->young_gen();
 888 
 889   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 890   _space_info[eden_space_id].set_space(young_gen->eden_space());
 891   _space_info[from_space_id].set_space(young_gen->from_space());
 892   _space_info[to_space_id].set_space(young_gen->to_space());
 893 
 894   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 895 }
 896 
 897 void PSParallelCompact::initialize_dead_wood_limiter()
 898 {
 899   const size_t max = 100;
 900   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 901   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 902   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 903   DEBUG_ONLY(_dwl_initialized = true;)
 904   _dwl_adjustment = normal_distribution(1.0);
 905 }
 906 


























 907 void
 908 PSParallelCompact::clear_data_covering_space(SpaceId id)
 909 {
 910   // At this point, top is the value before GC, new_top() is the value that will
 911   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 912   // should be marked above top.  The summary data is cleared to the larger of
 913   // top & new_top.
 914   MutableSpace* const space = _space_info[id].space();
 915   HeapWord* const bot = space->bottom();
 916   HeapWord* const top = space->top();
 917   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 918 
 919   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 920   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 921   _mark_bitmap.clear_range(beg_bit, end_bit);
 922 
 923   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 924   const size_t end_region =
 925     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 926   _summary_data.clear_range(beg_region, end_region);
 927 
 928   // Clear the data used to 'split' regions.
 929   SplitInfo& split_info = _space_info[id].split_info();
 930   if (split_info.is_valid()) {
 931     split_info.clear();
 932   }
 933   DEBUG_ONLY(split_info.verify_clear();)
 934 }
 935 
 936 void PSParallelCompact::pre_compact()
 937 {
 938   // Update the from & to space pointers in space_info, since they are swapped
 939   // at each young gen gc.  Do the update unconditionally (even though a
 940   // promotion failure does not swap spaces) because an unknown number of young
 941   // collections will have swapped the spaces an unknown number of times.
 942   GCTraceTime(Trace, gc, phases) tm("pre compact", &_gc_timer);
 943   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 944   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 945   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 946 


 947   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 948   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 949 
 950   // Increment the invocation count
 951   heap->increment_total_collections(true);
 952 
 953   // We need to track unique mark sweep invocations as well.
 954   _total_invocations++;
 955 
 956   heap->print_heap_before_gc();
 957   heap->trace_heap_before_gc(&_gc_tracer);
 958 
 959   // Fill in TLABs
 960   heap->accumulate_statistics_all_tlabs();
 961   heap->ensure_parsability(true);  // retire TLABs
 962 
 963   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 964     HandleMark hm;  // Discard invalid handles created during verification
 965     Universe::verify("Before GC");
 966   }
 967 
 968   // Verify object start arrays
 969   if (VerifyObjectStartArray &&
 970       VerifyBeforeGC) {
 971     heap->old_gen()->verify_object_start_array();
 972   }
 973 
 974   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 975   DEBUG_ONLY(summary_data().verify_clear();)
 976 
 977   // Have worker threads release resources the next time they run a task.
 978   gc_task_manager()->release_all_resources();
 979 }
 980 
 981 void PSParallelCompact::post_compact()
 982 {
 983   GCTraceTime(Trace, gc, phases) tm("post compact", &_gc_timer);
 984 
 985   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 986     // Clear the marking bitmap, summary data and split info.
 987     clear_data_covering_space(SpaceId(id));
 988     // Update top().  Must be done after clearing the bitmap and summary data.
 989     _space_info[id].publish_new_top();
 990   }
 991 
 992   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 993   MutableSpace* const from_space = _space_info[from_space_id].space();
 994   MutableSpace* const to_space   = _space_info[to_space_id].space();
 995 
 996   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 997   bool eden_empty = eden_space->is_empty();
 998   if (!eden_empty) {
 999     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1000                                             heap->young_gen(), heap->old_gen());
1001   }
1002 
1003   // Update heap occupancy information which is used as input to the soft ref


1517     // Recompute the summary data, taking into account the dense prefix.  If
1518     // every last byte will be reclaimed, then the existing summary data which
1519     // compacts everything can be left in place.
1520     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1521       // If dead space crosses the dense prefix boundary, it is (at least
1522       // partially) filled with a dummy object, marked live and added to the
1523       // summary data.  This simplifies the copy/update phase and must be done
1524       // before the final locations of objects are determined, to prevent
1525       // leaving a fragment of dead space that is too small to fill.
1526       fill_dense_prefix_end(id);
1527 
1528       // Compute the destination of each Region, and thus each object.
1529       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1530       _summary_data.summarize(_space_info[id].split_info(),
1531                               dense_prefix_end, space->top(), NULL,
1532                               dense_prefix_end, space->end(),
1533                               _space_info[id].new_top_addr());
1534     }
1535   }
1536 
1537   if (log_is_enabled(Develop, gc, compaction, phases)) {
1538     const size_t region_size = ParallelCompactData::RegionSize;
1539     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1540     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1541     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1542     HeapWord* const new_top = _space_info[id].new_top();
1543     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1544     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1545     log_develop(gc, compaction, phases)(
1546         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1547         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1548         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1549         id, space->capacity_in_words(), p2i(dense_prefix_end),
1550         dp_region, dp_words / region_size,
1551         cr_words / region_size, p2i(new_top));
1552   }
1553 }
1554 
1555 #ifndef PRODUCT
1556 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1557                                           HeapWord* dst_beg, HeapWord* dst_end,
1558                                           SpaceId src_space_id,
1559                                           HeapWord* src_beg, HeapWord* src_end)
1560 {
1561   log_develop(gc, compaction, phases)(
1562       "Summarizing %d [%s] into %d [%s]:  "
1563       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1564       SIZE_FORMAT "-" SIZE_FORMAT " "
1565       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1566       SIZE_FORMAT "-" SIZE_FORMAT,
1567       src_space_id, space_names[src_space_id],
1568       dst_space_id, space_names[dst_space_id],
1569       p2i(src_beg), p2i(src_end),
1570       _summary_data.addr_to_region_idx(src_beg),
1571       _summary_data.addr_to_region_idx(src_end),
1572       p2i(dst_beg), p2i(dst_end),
1573       _summary_data.addr_to_region_idx(dst_beg),
1574       _summary_data.addr_to_region_idx(dst_end));

1575 }
1576 #endif  // #ifndef PRODUCT
1577 
1578 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1579                                       bool maximum_compaction)
1580 {
1581   GCTraceTime(Trace, gc, phases) tm("summary phase", &_gc_timer);

1582 
1583 #ifdef  ASSERT
1584   if (TraceParallelOldGCMarkingPhase) {
1585     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1586                   "add_obj_bytes=" SIZE_FORMAT,
1587                   add_obj_count, add_obj_size * HeapWordSize);
1588     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1589                   "mark_bitmap_bytes=" SIZE_FORMAT,
1590                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1591   }
1592 #endif  // #ifdef ASSERT
1593 
1594   // Quick summarization of each space into itself, to see how much is live.
1595   summarize_spaces_quick();
1596 
1597   log_develop(gc, compaction, phases)("Summary_phase:  after summarizing each space to self");


1598   NOT_PRODUCT(print_region_ranges());

1599   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));


1600 
1601   // The amount of live data that will end up in old space (assuming it fits).
1602   size_t old_space_total_live = 0;
1603   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1604     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1605                                           _space_info[id].space()->bottom());
1606   }
1607 
1608   MutableSpace* const old_space = _space_info[old_space_id].space();
1609   const size_t old_capacity = old_space->capacity_in_words();
1610   if (old_space_total_live > old_capacity) {
1611     // XXX - should also try to expand
1612     maximum_compaction = true;
1613   }
1614 
1615   // Old generations.
1616   summarize_space(old_space_id, maximum_compaction);
1617 
1618   // Summarize the remaining spaces in the young gen.  The initial target space
1619   // is the old gen.  If a space does not fit entirely into the target, then the


1653       assert(next_src_addr != NULL, "sanity");
1654 
1655       // The source space becomes the new target, so the remainder is compacted
1656       // within the space itself.
1657       dst_space_id = SpaceId(id);
1658       dst_space_end = space->end();
1659       new_top_addr = _space_info[id].new_top_addr();
1660       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1661                                     space->bottom(), dst_space_end,
1662                                     SpaceId(id), next_src_addr, space->top());)
1663       done = _summary_data.summarize(_space_info[id].split_info(),
1664                                      next_src_addr, space->top(),
1665                                      NULL,
1666                                      space->bottom(), dst_space_end,
1667                                      new_top_addr);
1668       assert(done, "space must fit when compacted into itself");
1669       assert(*new_top_addr <= space->top(), "usage should not grow");
1670     }
1671   }
1672 
1673   log_develop(gc, compaction, phases)("Summary_phase:  after final summarization");


1674   NOT_PRODUCT(print_region_ranges());
1675   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));



1676 }
1677 
1678 // This method should contain all heap-specific policy for invoking a full
1679 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1680 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1681 // before full gc, or any other specialized behavior, it needs to be added here.
1682 //
1683 // Note that this method should only be called from the vm_thread while at a
1684 // safepoint.
1685 //
1686 // Note that the all_soft_refs_clear flag in the collector policy
1687 // may be true because this method can be called without intervening
1688 // activity.  For example when the heap space is tight and full measure
1689 // are being taken to free space.
1690 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1691   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1692   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1693          "should be in vm thread");
1694 
1695   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


1730   TimeStamp compaction_start;
1731   TimeStamp collection_exit;
1732 
1733   GCCause::Cause gc_cause = heap->gc_cause();
1734   PSYoungGen* young_gen = heap->young_gen();
1735   PSOldGen* old_gen = heap->old_gen();
1736   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1737 
1738   // The scope of casr should end after code that can change
1739   // CollectorPolicy::_should_clear_all_soft_refs.
1740   ClearedAllSoftRefs casr(maximum_heap_compaction,
1741                           heap->collector_policy());
1742 
1743   if (ZapUnusedHeapArea) {
1744     // Save information needed to minimize mangling
1745     heap->record_gen_tops_before_GC();
1746   }
1747 
1748   heap->pre_full_gc_dump(&_gc_timer);
1749 


1750   // Make sure data structures are sane, make the heap parsable, and do other
1751   // miscellaneous bookkeeping.
1752   pre_compact();
1753 
1754   PreGCValues pre_gc_values(heap);
1755 
1756   // Get the compaction manager reserved for the VM thread.
1757   ParCompactionManager* const vmthread_cm =
1758     ParCompactionManager::manager_array(gc_task_manager()->workers());
1759 



1760   {
1761     ResourceMark rm;
1762     HandleMark hm;
1763 
1764     // Set the number of GC threads to be used in this collection
1765     gc_task_manager()->set_active_gang();
1766     gc_task_manager()->task_idle_workers();
1767 
1768     GCTraceCPUTime tcpu;
1769     GCTraceTime(Info, gc) tm("Full GC", NULL, gc_cause, true);
1770     TraceCollectorStats tcs(counters());
1771     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1772 
1773     if (TraceOldGenTime) accumulated_time()->start();
1774 
1775     // Let the size policy know we're starting
1776     size_policy->major_collection_begin();
1777 
1778     CodeCache::gc_prologue();
1779 
1780 #if defined(COMPILER2) || INCLUDE_JVMCI
1781     DerivedPointerTable::clear();
1782 #endif
1783 
1784     ref_processor()->enable_discovery();
1785     ref_processor()->setup_policy(maximum_heap_compaction);
1786 
1787     bool marked_for_unloading = false;
1788 
1789     marking_start.update();


1796 #if defined(COMPILER2) || INCLUDE_JVMCI
1797     assert(DerivedPointerTable::is_active(), "Sanity");
1798     DerivedPointerTable::set_active(false);
1799 #endif
1800 
1801     // adjust_roots() updates Universe::_intArrayKlassObj which is
1802     // needed by the compaction for filling holes in the dense prefix.
1803     adjust_roots();
1804 
1805     compaction_start.update();
1806     compact();
1807 
1808     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1809     // done before resizing.
1810     post_compact();
1811 
1812     // Let the size policy know we're done
1813     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1814 
1815     if (UseAdaptiveSizePolicy) {
1816       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1817       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,






1818                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());


1819 
1820       // Don't check if the size_policy is ready here.  Let
1821       // the size_policy check that internally.
1822       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1823           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1824         // Swap the survivor spaces if from_space is empty. The
1825         // resize_young_gen() called below is normally used after
1826         // a successful young GC and swapping of survivor spaces;
1827         // otherwise, it will fail to resize the young gen with
1828         // the current implementation.
1829         if (young_gen->from_space()->is_empty()) {
1830           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1831           young_gen->swap_spaces();
1832         }
1833 
1834         // Calculate optimal free space amounts
1835         assert(young_gen->max_size() >
1836           young_gen->from_space()->capacity_in_bytes() +
1837           young_gen->to_space()->capacity_in_bytes(),
1838           "Sizes of space in young gen are out-of-bounds");


1856                                                     max_old_gen_size,
1857                                                     max_eden_size,
1858                                                     true /* full gc*/);
1859 
1860         size_policy->check_gc_overhead_limit(young_live,
1861                                              eden_live,
1862                                              max_old_gen_size,
1863                                              max_eden_size,
1864                                              true /* full gc*/,
1865                                              gc_cause,
1866                                              heap->collector_policy());
1867 
1868         size_policy->decay_supplemental_growth(true /* full gc*/);
1869 
1870         heap->resize_old_gen(
1871           size_policy->calculated_old_free_size_in_bytes());
1872 
1873         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1874                                size_policy->calculated_survivor_size_in_bytes());
1875       }
1876 
1877       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());


1878     }
1879 
1880     if (UsePerfData) {
1881       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1882       counters->update_counters();
1883       counters->update_old_capacity(old_gen->capacity_in_bytes());
1884       counters->update_young_capacity(young_gen->capacity_in_bytes());
1885     }
1886 
1887     heap->resize_all_tlabs();
1888 
1889     // Resize the metaspace capacity after a collection
1890     MetaspaceGC::compute_new_size();
1891 
1892     if (TraceOldGenTime) {
1893       accumulated_time()->stop();
1894     }
1895 



1896     young_gen->print_used_change(pre_gc_values.young_gen_used());
1897     old_gen->print_used_change(pre_gc_values.old_gen_used());

1898     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());




1899 
1900     // Track memory usage and detect low memory
1901     MemoryService::track_memory_usage();
1902     heap->update_counters();
1903     gc_task_manager()->release_idle_workers();
1904   }
1905 
1906 #ifdef ASSERT
1907   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1908     ParCompactionManager* const cm =
1909       ParCompactionManager::manager_array(int(i));
1910     assert(cm->marking_stack()->is_empty(),       "should be empty");
1911     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1912   }
1913 #endif // ASSERT
1914 
1915   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1916     HandleMark hm;  // Discard invalid handles created during verification
1917     Universe::verify("After GC");
1918   }
1919 
1920   // Re-verify object start arrays
1921   if (VerifyObjectStartArray &&
1922       VerifyAfterGC) {
1923     old_gen->verify_object_start_array();
1924   }
1925 
1926   if (ZapUnusedHeapArea) {
1927     old_gen->object_space()->check_mangled_unused_area_complete();
1928   }
1929 
1930   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1931 
1932   collection_exit.update();
1933 
1934   heap->print_heap_after_gc();
1935   heap->trace_heap_after_gc(&_gc_tracer);
1936 
1937   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,


1938                          marking_start.ticks(), compaction_start.ticks(),
1939                          collection_exit.ticks());
1940   gc_task_manager()->print_task_time_stamps();

1941 
1942   heap->post_full_gc_dump(&_gc_timer);
1943 
1944 #ifdef TRACESPINNING
1945   ParallelTaskTerminator::print_termination_counts();
1946 #endif
1947 
1948   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1949 
1950   _gc_timer.register_gc_end();
1951 
1952   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1953   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1954 
1955   return true;
1956 }
1957 
1958 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1959                                              PSYoungGen* young_gen,
1960                                              PSOldGen* old_gen) {
1961   MutableSpace* const eden_space = young_gen->eden_space();
1962   assert(!eden_space->is_empty(), "eden must be non-empty");
1963   assert(young_gen->virtual_space()->alignment() ==
1964          old_gen->virtual_space()->alignment(), "alignments do not match");
1965 
1966   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1967     return false;
1968   }
1969 


1976   }
1977 
1978   // Figure out how much to take from eden.  Include the average amount promoted
1979   // in the total; otherwise the next young gen GC will simply bail out to a
1980   // full GC.
1981   const size_t alignment = old_gen->virtual_space()->alignment();
1982   const size_t eden_used = eden_space->used_in_bytes();
1983   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1984   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1985   const size_t eden_capacity = eden_space->capacity_in_bytes();
1986 
1987   if (absorb_size >= eden_capacity) {
1988     return false; // Must leave some space in eden.
1989   }
1990 
1991   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1992   if (new_young_size < young_gen->min_gen_size()) {
1993     return false; // Respect young gen minimum size.
1994   }
1995 
1996   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "

1997                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
1998                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
1999                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2000                         absorb_size / K,
2001                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2002                         young_gen->from_space()->used_in_bytes() / K,
2003                         young_gen->to_space()->used_in_bytes() / K,
2004                         young_gen->capacity_in_bytes() / K, new_young_size / K);

2005 
2006   // Fill the unused part of the old gen.
2007   MutableSpace* const old_space = old_gen->object_space();
2008   HeapWord* const unused_start = old_space->top();
2009   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2010 
2011   if (unused_words > 0) {
2012     if (unused_words < CollectedHeap::min_fill_size()) {
2013       return false;  // If the old gen cannot be filled, must give up.
2014     }
2015     CollectedHeap::fill_with_objects(unused_start, unused_words);
2016   }
2017 
2018   // Take the live data from eden and set both top and end in the old gen to
2019   // eden top.  (Need to set end because reset_after_change() mangles the region
2020   // from end to virtual_space->high() in debug builds).
2021   HeapWord* const new_top = eden_space->top();
2022   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2023                                         absorb_size);
2024   young_gen->reset_after_change();


2034 
2035   // Could update the promoted average here, but it is not typically updated at
2036   // full GCs and the value to use is unclear.  Something like
2037   //
2038   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2039 
2040   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2041   return true;
2042 }
2043 
2044 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2045   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2046     "shouldn't return NULL");
2047   return ParallelScavengeHeap::gc_task_manager();
2048 }
2049 
2050 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2051                                       bool maximum_heap_compaction,
2052                                       ParallelOldTracer *gc_tracer) {
2053   // Recursively traverse all live objects and mark them
2054   GCTraceTime(Trace, gc, phases) tm("marking phase", &_gc_timer);
2055 
2056   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2057   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2058   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2059   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2060   ParallelTaskTerminator terminator(active_gc_threads, qset);
2061 
2062   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2063   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2064 
2065   // Need new claim bits before marking starts.
2066   ClassLoaderDataGraph::clear_claimed_marks();
2067 
2068   {
2069     GCTraceTime(Trace, gc, phases) tm("par mark", &_gc_timer);
2070 
2071     ParallelScavengeHeap::ParStrongRootsScope psrs;
2072 
2073     GCTaskQueue* q = GCTaskQueue::create();
2074 
2075     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2076     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2077     // We scan the thread roots in parallel
2078     Threads::create_thread_roots_marking_tasks(q);
2079     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2080     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2081     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2082     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2083     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2084     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2085     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2086 
2087     if (active_gc_threads > 1) {
2088       for (uint j = 0; j < active_gc_threads; j++) {
2089         q->enqueue(new StealMarkingTask(&terminator));
2090       }
2091     }
2092 
2093     gc_task_manager()->execute_and_wait(q);
2094   }
2095 
2096   // Process reference objects found during marking
2097   {
2098     GCTraceTime(Trace, gc, phases) tm("reference processing", &_gc_timer);
2099 
2100     ReferenceProcessorStats stats;
2101     if (ref_processor()->processing_is_mt()) {
2102       RefProcTaskExecutor task_executor;
2103       stats = ref_processor()->process_discovered_references(
2104         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2105         &task_executor, &_gc_timer);
2106     } else {
2107       stats = ref_processor()->process_discovered_references(
2108         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2109         &_gc_timer);
2110     }
2111 
2112     gc_tracer->report_gc_reference_stats(stats);
2113   }
2114 
2115   GCTraceTime(Trace, gc) tm_m("class unloading", &_gc_timer);
2116 
2117   // This is the point where the entire marking should have completed.
2118   assert(cm->marking_stacks_empty(), "Marking should have completed");
2119 
2120   // Follow system dictionary roots and unload classes.
2121   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2122 
2123   // Unload nmethods.
2124   CodeCache::do_unloading(is_alive_closure(), purged_class);
2125 
2126   // Prune dead klasses from subklass/sibling/implementor lists.
2127   Klass::clean_weak_klass_links(is_alive_closure());
2128 
2129   // Delete entries for dead interned strings.
2130   StringTable::unlink(is_alive_closure());
2131 
2132   // Clean up unreferenced symbols in symbol table.
2133   SymbolTable::unlink();
2134   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2135 }
2136 
2137 // This should be moved to the shared markSweep code!
2138 class PSAlwaysTrueClosure: public BoolObjectClosure {
2139 public:
2140   bool do_object_b(oop p) { return true; }
2141 };
2142 static PSAlwaysTrueClosure always_true;
2143 
2144 void PSParallelCompact::adjust_roots() {
2145   // Adjust the pointers to reflect the new locations
2146   GCTraceTime(Trace, gc, phases) tm("adjust roots", &_gc_timer);
2147 
2148   // Need new claim bits when tracing through and adjusting pointers.
2149   ClassLoaderDataGraph::clear_claimed_marks();
2150 
2151   // General strong roots.
2152   Universe::oops_do(adjust_pointer_closure());
2153   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
2154   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2155   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2156   ObjectSynchronizer::oops_do(adjust_pointer_closure());
2157   FlatProfiler::oops_do(adjust_pointer_closure());
2158   Management::oops_do(adjust_pointer_closure());
2159   JvmtiExport::oops_do(adjust_pointer_closure());
2160   SystemDictionary::oops_do(adjust_pointer_closure());
2161   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2162 
2163   // Now adjust pointers in remaining weak roots.  (All of which should
2164   // have been cleared if they pointed to non-surviving objects.)
2165   // Global (weak) JNI handles
2166   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2167 
2168   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2169   CodeCache::blobs_do(&adjust_from_blobs);
2170   StringTable::oops_do(adjust_pointer_closure());
2171   ref_processor()->weak_oops_do(adjust_pointer_closure());
2172   // Roots were visited so references into the young gen in roots
2173   // may have been scanned.  Process them also.
2174   // Should the reference processor have a span that excludes
2175   // young gen objects?
2176   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2177 }
2178 
2179 // Helper class to print 8 region numbers per line and then print the total at the end.
2180 class FillableRegionLogger : public StackObj {
2181 private:
2182   LogHandle(gc, compaction) log;
2183   static const int LineLength = 8;
2184   size_t _regions[LineLength];
2185   int _next_index;
2186   bool _enabled;
2187   size_t _total_regions;
2188 public:
2189   FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(log.is_develop()) { }
2190   ~FillableRegionLogger() {
2191     log.develop(SIZE_FORMAT " initially fillable regions", _total_regions);
2192   }
2193 
2194   void print_line() {
2195     if (!_enabled || _next_index == 0) {
2196       return;
2197     }
2198     FormatBuffer<> line("Fillable: ");
2199     for (int i = 0; i < _next_index; i++) {
2200       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2201     }
2202     log.develop("%s", line.buffer());
2203     _next_index = 0;
2204   }
2205 
2206   void handle(size_t region) {
2207     if (!_enabled) {
2208       return;
2209     }
2210     _regions[_next_index++] = region;
2211     if (_next_index == LineLength) {
2212       print_line();
2213     }
2214     _total_regions++;
2215   }
2216 };
2217 
2218 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2219                                                       uint parallel_gc_threads)
2220 {
2221   GCTraceTime(Trace, gc, phases) tm("drain task setup", &_gc_timer);
2222 
2223   // Find the threads that are active
2224   unsigned int which = 0;
2225 
2226   const uint task_count = MAX2(parallel_gc_threads, 1U);
2227   for (uint j = 0; j < task_count; j++) {
2228     q->enqueue(new DrainStacksCompactionTask(j));
2229     ParCompactionManager::verify_region_list_empty(j);
2230     // Set the region stacks variables to "no" region stack values
2231     // so that they will be recognized and needing a region stack
2232     // in the stealing tasks if they do not get one by executing
2233     // a draining stack.
2234     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2235     cm->set_region_stack(NULL);
2236     cm->set_region_stack_index((uint)max_uintx);
2237   }
2238   ParCompactionManager::reset_recycled_stack_index();
2239 
2240   // Find all regions that are available (can be filled immediately) and
2241   // distribute them to the thread stacks.  The iteration is done in reverse
2242   // order (high to low) so the regions will be removed in ascending order.
2243 
2244   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2245 

2246   // A region index which corresponds to the tasks created above.
2247   // "which" must be 0 <= which < task_count
2248 
2249   which = 0;
2250   // id + 1 is used to test termination so unsigned  can
2251   // be used with an old_space_id == 0.
2252   FillableRegionLogger region_logger;
2253   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2254     SpaceInfo* const space_info = _space_info + id;
2255     MutableSpace* const space = space_info->space();
2256     HeapWord* const new_top = space_info->new_top();
2257 
2258     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2259     const size_t end_region =
2260       sd.addr_to_region_idx(sd.region_align_up(new_top));
2261 
2262     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2263       if (sd.region(cur)->claim_unsafe()) {
2264         ParCompactionManager::region_list_push(which, cur);
2265         region_logger.handle(cur);









2266         // Assign regions to tasks in round-robin fashion.
2267         if (++which == task_count) {
2268           assert(which <= parallel_gc_threads,
2269             "Inconsistent number of workers");
2270           which = 0;
2271         }
2272       }
2273     }
2274     region_logger.print_line();




2275   }
2276 }
2277 
2278 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2279 
2280 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2281                                                     uint parallel_gc_threads) {
2282   GCTraceTime(Trace, gc, phases) tm("dense prefix task setup", &_gc_timer);
2283 
2284   ParallelCompactData& sd = PSParallelCompact::summary_data();
2285 
2286   // Iterate over all the spaces adding tasks for updating
2287   // regions in the dense prefix.  Assume that 1 gc thread
2288   // will work on opening the gaps and the remaining gc threads
2289   // will work on the dense prefix.
2290   unsigned int space_id;
2291   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2292     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2293     const MutableSpace* const space = _space_info[space_id].space();
2294 
2295     if (dense_prefix_end == space->bottom()) {
2296       // There is no dense prefix for this space.
2297       continue;
2298     }
2299 
2300     // The dense prefix is before this region.
2301     size_t region_index_end_dense_prefix =
2302         sd.addr_to_region_idx(dense_prefix_end);


2344         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2345                                              region_index_start,
2346                                              region_index_end));
2347         region_index_start = region_index_end;
2348       }
2349     }
2350     // This gets any part of the dense prefix that did not
2351     // fit evenly.
2352     if (region_index_start < region_index_end_dense_prefix) {
2353       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2354                                            region_index_start,
2355                                            region_index_end_dense_prefix));
2356     }
2357   }
2358 }
2359 
2360 void PSParallelCompact::enqueue_region_stealing_tasks(
2361                                      GCTaskQueue* q,
2362                                      ParallelTaskTerminator* terminator_ptr,
2363                                      uint parallel_gc_threads) {
2364   GCTraceTime(Trace, gc, phases) tm("steal task setup", &_gc_timer);
2365 
2366   // Once a thread has drained it's stack, it should try to steal regions from
2367   // other threads.
2368   if (parallel_gc_threads > 1) {
2369     for (uint j = 0; j < parallel_gc_threads; j++) {
2370       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2371     }
2372   }
2373 }
2374 
2375 #ifdef ASSERT
2376 // Write a histogram of the number of times the block table was filled for a
2377 // region.
2378 void PSParallelCompact::write_block_fill_histogram()
2379 {
2380   LogHandle(gc, compaction) log;
2381   if (!log.is_develop()) return;
2382 
2383   ResourceMark rm;
2384   outputStream* out = log.develop_stream();
2385 
2386   typedef ParallelCompactData::RegionData rd_t;
2387   ParallelCompactData& sd = summary_data();
2388 
2389   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2390     MutableSpace* const spc = _space_info[id].space();
2391     if (spc->bottom() != spc->top()) {
2392       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2393       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2394       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2395 
2396       size_t histo[5] = { 0, 0, 0, 0, 0 };
2397       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2398       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2399 
2400       for (const rd_t* cur = beg; cur < end; ++cur) {
2401         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2402       }
2403       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2404       for (size_t i = 0; i < histo_len; ++i) {
2405         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2406                    histo[i], 100.0 * histo[i] / region_cnt);
2407       }
2408       out->cr();
2409     }
2410   }
2411 }
2412 #endif // #ifdef ASSERT
2413 
2414 void PSParallelCompact::compact() {
2415   GCTraceTime(Trace, gc, phases) tm("compaction phase", &_gc_timer);

2416 
2417   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2418   PSOldGen* old_gen = heap->old_gen();
2419   old_gen->start_array()->reset();
2420   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2421   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2422   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2423   ParallelTaskTerminator terminator(active_gc_threads, qset);
2424 
2425   GCTaskQueue* q = GCTaskQueue::create();
2426   enqueue_region_draining_tasks(q, active_gc_threads);
2427   enqueue_dense_prefix_tasks(q, active_gc_threads);
2428   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2429 
2430   {
2431     GCTraceTime(Trace, gc, phases) tm("par compact", &_gc_timer);
2432 
2433     gc_task_manager()->execute_and_wait(q);
2434 
2435 #ifdef  ASSERT
2436     // Verify that all regions have been processed before the deferred updates.
2437     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2438       verify_complete(SpaceId(id));
2439     }
2440 #endif
2441   }
2442 
2443   {
2444     // Update the deferred objects, if any.  Any compaction manager can be used.
2445     GCTraceTime(Trace, gc, phases) tm("deferred updates", &_gc_timer);
2446     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2447     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2448       update_deferred_objects(cm, SpaceId(id));
2449     }
2450   }
2451 
2452   DEBUG_ONLY(write_block_fill_histogram());
2453 }
2454 
2455 #ifdef  ASSERT
2456 void PSParallelCompact::verify_complete(SpaceId space_id) {
2457   // All Regions between space bottom() to new_top() should be marked as filled
2458   // and all Regions between new_top() and top() should be available (i.e.,
2459   // should have been emptied).
2460   ParallelCompactData& sd = summary_data();
2461   SpaceInfo si = _space_info[space_id];
2462   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2463   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2464   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2465   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2466   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2467 
2468   bool issued_a_warning = false;
2469 
2470   size_t cur_region;
2471   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2472     const RegionData* const c = sd.region(cur_region);


3061 
3062 void InstanceKlass::oop_pc_update_pointers(oop obj) {
3063   oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());
3064 }
3065 
3066 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
3067   InstanceKlass::oop_pc_update_pointers(obj);
3068 
3069   oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());
3070 }
3071 
3072 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
3073   InstanceKlass::oop_pc_update_pointers(obj);
3074 }
3075 
3076 #ifdef ASSERT
3077 template <class T> static void trace_reference_gc(const char *s, oop obj,
3078                                                   T* referent_addr,
3079                                                   T* next_addr,
3080                                                   T* discovered_addr) {
3081   log_develop(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3082   log_develop(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3083                                p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3084   log_develop(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3085                                p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3086   log_develop(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3087                                p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);





3088 }
3089 #endif
3090 
3091 template <class T>
3092 static void oop_pc_update_pointers_specialized(oop obj) {
3093   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3094   PSParallelCompact::adjust_pointer(referent_addr);
3095   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3096   PSParallelCompact::adjust_pointer(next_addr);
3097   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3098   PSParallelCompact::adjust_pointer(discovered_addr);
3099   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3100                                 referent_addr, next_addr, discovered_addr);)
3101 }
3102 
3103 void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
3104   InstanceKlass::oop_pc_update_pointers(obj);
3105 
3106   if (UseCompressedOops) {
3107     oop_pc_update_pointers_specialized<narrowOop>(obj);


< prev index next >